how to remove floor surface in the pointcloud2 data with the pcl_ros - ros

first, thanks for reading.
now i am trying to do remove floor in the point cloud data,
this is a code that i wrote to remove the floor point cloud.
#include <iostream>
#include <cmath>
#include <vector>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
#include <velodyne_pointcloud/point_types.h>
#include <pcl/common/common.h>
#include <pcl/common/centroid.h>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <set>
#include <pcl/io/pcd_io.h>
#include <boost/format.hpp>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/passthrough.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/segmentation/sac_segmentation.h>
struct VelodynePointXYZIRT
{
PCL_ADD_POINT4D
PCL_ADD_INTENSITY;
uint16_t ring;
float time;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
} EIGEN_ALIGN16;
POINT_CLOUD_REGISTER_POINT_STRUCT (VelodynePointXYZIRT,
(float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity)
(uint16_t, ring, ring) (float, time, time)
)
ros::Publisher pub1;
using PointXYZIRT = VelodynePointXYZIRT;
void help (const sensor_msgs::PointCloud2ConstPtr& scan)
{
// Convert the sensor_msgs/PointCloud2 data to pcl/PointCloud
pcl::PointCloud<VelodynePointXYZIRT>::Ptr cloud(new pcl::PointCloud<VelodynePointXYZIRT>());
pcl::fromROSMsg (*scan, *cloud);
pcl::ModelCoefficients coefficients;
pcl::PointIndices inliers;
// Create the segmentation object
pcl::SACSegmentation< pcl::PointCloud<PointXYZIRT>> seg;
// Optional
seg.setOptimizeCoefficients (true);
// Mandatory
seg.setModelType (pcl::SACMODEL_PLANE);
seg.setMethodType (pcl::SAC_RANSAC);
seg.setDistanceThreshold (0.01);
seg.setInputCloud (cloud.makeShared ());
seg.segment (inliers, coefficients);
// Publish the model coefficients
pcl_msgs::ModelCoefficients ros_coefficients;
pcl_conversions::fromPCL(coefficients, ros_coefficients);
pub1.publish (ros_coefficients);
}
int main (int argc, char** argv)
{
// Initialize ROS
ros::init (argc, argv, "my_pcl_tutorial");
ros::NodeHandle nh;
// Create a ROS subscriber for the input point cloud
ros::Subscriber sub = nh.subscribe ("input", 1, help);
// Create a ROS publisher for the output point cloud
//#pub1 = nh.advertise<sensor_msgs::PointCloud2> ("output", 1);
pub1 = nh.advertise<pcl_msgs::ModelCoefficients> ("output", 1);
// Spin
ros::spin ();
}
as i have to put the ring data in the code, so i made the struct for defining the velodyne lidar,
but when i catkin_make in the ros,
this error comes,
error: ‘pcl::PointCloud<VelodynePointXYZIRT>::Ptr {aka class boost::shared_ptr<pcl::PointCloud<VelodynePointXYZIRT> >}’ has no member named ‘makeShared’
seg.setInputCloud (cloud.makeShared ());
is there a method that i visualize the data?
my reference for making the code is this site,https://adioshun.gitbooks.io/pcl-tutorial/content/part-1/part01-chapter05/part01-chapter05-practice.html
and my previous code to transform the lidar data, i used these code to make my own.....
#include <iostream>
#include <cmath>
#include <vector>
#include <ros/ros.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
#include <velodyne_pointcloud/point_types.h>
#include <pcl/common/common.h>
#include <pcl/common/centroid.h>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
#include <set>
#include <pcl/io/pcd_io.h>
#include <boost/format.hpp>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/filters/passthrough.h>
#define PI 3.14159265359
using namespace std;
struct VelodynePointXYZIRT
{
PCL_ADD_POINT4D
PCL_ADD_INTENSITY;
uint16_t ring;
float time;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
} EIGEN_ALIGN16;
POINT_CLOUD_REGISTER_POINT_STRUCT (VelodynePointXYZIRT,
(float, x, x) (float, y, y) (float, z, z) (float, intensity, intensity)
(uint16_t, ring, ring) (float, time, time)
)
ros::Publisher pub1;
float theta_r = 45* M_PI/ 180; // 라디안 각도로 회전 (180도 회전)
using PointXYZIRT = VelodynePointXYZIRT;
void input(const sensor_msgs::PointCloud2ConstPtr& scan)
{
// Msg to pointcloud
pcl::PointCloud<VelodynePointXYZIRT>::Ptr cloud(new pcl::PointCloud<VelodynePointXYZIRT>());
pcl::fromROSMsg(*scan,*cloud); // ros msg 에서 pcl cloud 데이터로 변환
//회전변환행렬
Eigen::Matrix4f transform_1 = Eigen::Matrix4f::Identity();
// Define a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix)
transform_1 (0,0) = std::cos (theta_r);
transform_1 (0,2) = std::sin(theta_r);
transform_1 (2,0) = -sin (theta_r);
transform_1 (2,2) = std::cos (theta_r);
// (row, column)
// Executing the transformation
pcl::PointCloud<VelodynePointXYZIRT>::Ptr transformed_cloud (new pcl::PointCloud<PointXYZIRT>());
pcl::transformPointCloud (*cloud, *transformed_cloud, transform_1);
pcl::PCLPointCloud2 cloud_p;
pcl::toPCLPointCloud2(*transformed_cloud, cloud_p);
sensor_msgs::PointCloud2 output;
pcl_conversions::fromPCL(cloud_p, output);
output.header.frame_id = "velodyne";
pub1.publish(output);
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "input");
ros::NodeHandle nh;
ros::Subscriber sub = nh.subscribe<sensor_msgs::PointCloud2> ("vlp202", 100, input);
pub1 = nh.advertise<sensor_msgs::PointCloud2> ("vlp203", 100);
ros::spin();
}
//if you have to make your own type of custum point type in the pcl, you can see this one to see how to code it !!
// also the ring data that you have to use is from the lio_sam, image projection.cpp
so for the abstract,
i want to remove floor point cloud data in recorded bag,
so i made some code to remove floor data

The "cloud" object is already a shared pointer. Remove makeShared call:
seg.setInputCloud (cloud.makeShared ());
to
seg.setInputCloud (cloud);
For visualization you could publish over a topic the result as ros message and visualize it with rviz.

Related

readlink() error while reading /proc/self/exefile on QNX

I am working on QNX platform, in which I need to get the path of executable which is running.
I have wrote a small peice of code, which is returning always -1:
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
extern int errno;
int main( int argc, char** argv )
{
char buf[512] = {0};
const char mypath[100] = "/proc/self/exefile";
errno = 0;
printf("The value readlink:: %d %s\n",readlink(mypath, buf, 512 ), strerror( errno ));
return( 0 );
}
When I ran above code then I get following output:
The value readlink:: -1 No such file or directory
Am I missing anything?
What needs to be done to get my current exe path in QNX?
In QNX /proc/self/exefile is not a symbolic link; It's a regular file.
Try:
#include <fstream>
#include <iostream>
#include <string>
int main(int argc, char **argv) {
std::ifstream file("/proc/self/exefile");
std::string path;
std::getline(file, path);
std::cout << path << "\n";
return 0;
}

Run tensorflow model to achieve super resolution with opencv dnn.hpp

#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/imgproc.hpp"
#include<opencv2/opencv.hpp>
#include<opencv2/dnn.hpp>
#include <iostream>
using namespace std;
using namespace cv;
const size_t inWidth = 300;
const size_t inHeight = 300;
const float WHRatio = inWidth / (float)inHeight;
const char* classNames\[\] = { "background","face" };
void saveMat(cv::Mat inputMat,char* filename);
int main() {
String model = "opt_srcnn_vgg_test.pb";
dnn::Net net = cv::dnn::readNetFromTensorflow(model);
Mat img = imread("test.bmp");
Mat input;
img.copyTo(input);
cvtColor(input,input,CV_BGR2GRAY);//turn the rgb to gray
cout<<"load finish!"<<endl;
Mat inputBlob = dnn::blobFromImage(input, 1./255, Size(img.cols, img.rows));
imshow("input",input);
waitKey(0);
cout<<inputBlob.size;
net.setInput(inputBlob, "input");//set the network input,
cout<<"feed finish!"<<endl;
Mat pred = net.forward();
resize(pred,pred,Size(img.cols,img.rows));
imshow("result",pred);
waitKey(0);
return 0;
}
We tried to make opencv dnn use tensorflow trained .pb file to achieve super resolution. Our expected result is Figure 1, but we get the result of Figure 2 (Note: We determine that the .pb file is error-free). Why is this? Thank you!

Why is OpenCV Gpu module performing faster than VisionWorks?

I have tried several functions of OpenCv gpu module and compared the same behavior with visionWorks immediate code. And surprisingly, it all circumstances the OpenCv Gpu Module is performing significantly faster than VisionWorks.
e-g
a Gaussian pyramid of level 4 implemented manually using opencv
#include <iostream>
#include <stdio.h>
#include <stdio.h>
#include <queue>
/* OPENCV RELATED */
#include <cv.h>
#include <highgui.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/gpu/gpu.hpp>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
using namespace gpu;
using namespace cv::detail;
int main()
{
Mat m = imread("br1.png");
GpuMat d_m = GpuMat (m);
GpuMat d_m2;
GpuMat l1,l2,l3,l4;
int iter = 100;
int64 e = getTickCount();
float sum = 0;
sum = 0;
for(int i = 0 ; i < iter; i++)
{
e = getTickCount();
gpu::pyrDown(d_m,l1);
gpu::pyrDown(l1,l2);
gpu::pyrDown(l2,l3);
gpu::pyrDown(l3,l4);
sum+= (getTickCount() - e) / getTickFrequency();
}
cout <<"Time taken by Gussian Pyramid Level 4 \t\t\t"<<sum/iter<<" sec"<<endl;
//imwrite("cv_res.jpg",res);
return 0;
}
takes 2.5 ms on average for 100 iterations. Whereas, VisionWorks
#include <VX/vx.h>
#include <VX/vxu.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <stdio.h>
#include <queue>
/* OPENCV RELATED */
#include <cv.h>
#include <highgui.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/gpu/gpu.hpp>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
using namespace gpu;
using namespace cv::detail;
vx_image createImageFromMat(vx_context& context, cv::Mat& mat);
vx_status createMatFromImage(vx_image& image, cv::Mat& mat);
/* Entry point. */
int main(int argc,char* argv[])
{
Mat cv_src1 = imread("br1.png", IMREAD_GRAYSCALE);
int width = 1280;
int height = 720;
int half_width = width/2;
int half_height = height/2;
Mat dstMat(cv_src1.size(), cv_src1.type());
Mat half_dstMat(Size(width/16,height/16),cv_src1.type());
/* Image data. */
if (cv_src1.empty() )
{
std::cerr << "Can't load input images" << std::endl;
return -1;
}
/* Create our context. */
vx_context context = vxCreateContext();
/* Image to process. */
vx_image image = createImageFromMat(context, cv_src1);
//NVXIO_CHECK_REFERENCE(image);
/* Intermediate images. */
vx_image dx = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image dy = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image mag = vxCreateImage(context, width, height, VX_DF_IMAGE_S16);
vx_image half_image = vxCreateImage(context, half_width, half_height, VX_DF_IMAGE_U8);
vx_image half_image_2 = vxCreateImage(context, half_width/2, half_height/2, VX_DF_IMAGE_U8);
vx_image half_image_3 = vxCreateImage(context, half_width/4, half_height/4, VX_DF_IMAGE_U8);
vx_image half_image_4 = vxCreateImage(context, half_width/8, half_height/8, VX_DF_IMAGE_U8);
int64 e = getTickCount();
int iter = 100;
float sum = 0.0;
e = getTickCount();
iter = 100;
for(int i = 0 ; i < iter; i ++)
{
/* RESIZEZ OPERATION */
if(vxuHalfScaleGaussian(context,image,half_image,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image,half_image_2,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_2,half_image_3,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_3,half_image_4,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
sum += (getTickCount() - e) / getTickFrequency();
}
cout <<"Resize to half " <<sum/iter<<endl;
createMatFromImage(half_image_4,half_dstMat);
imwrite("RES.jpg",half_dstMat);
/* Tidy up. */
vxReleaseImage(&dx);
vxReleaseImage(&dy);
vxReleaseImage(&mag);
vxReleaseContext(&context);
}
vx_image createImageFromMat(vx_context& context, cv::Mat& mat)
{
vx_imagepatch_addressing_t src_addr = {
mat.cols, mat.rows, sizeof(vx_uint8), mat.cols * sizeof(vx_uint8), VX_SCALE_UNITY, VX_SCALE_UNITY, 1, 1 };
void* src_ptr = mat.data;
vx_image image = vxCreateImageFromHandle(context, VX_DF_IMAGE_U8, &src_addr, &src_ptr, VX_IMPORT_TYPE_HOST);
return image;
}
vx_status createMatFromImage(vx_image& image, cv::Mat& mat)
{
vx_status status = VX_SUCCESS;
vx_uint8 *ptr = NULL;
cout <<"Creating image "<<mat.cols << " " <<mat.rows <<endl;
vx_rectangle_t rect;
vxGetValidRegionImage(image, &rect);
vx_imagepatch_addressing_t addr = {
mat.cols, mat.rows, sizeof(vx_uint8), mat.cols * sizeof(vx_uint8), VX_SCALE_UNITY, VX_SCALE_UNITY, 1, 1 };
status = vxAccessImagePatch(image, &rect, 0, &addr, (void **)&ptr, VX_READ_ONLY);
mat.data = ptr;
return status;
}
takes 11.1 ms on single execution, and 96ms on average for 100 iterations.
If this is generally true, then what does visionWorks offer ?
I am running "cuda-repo-l4t-r21.3-6-5-local_6.5-50" version of L4T on Jetson TK1
You've made a mistake in VisionWorks code. You start timer only once e = getTickCount(); right before the loop, but you need to start it on each iteration.
iter = 100;
for(int i = 0 ; i < iter; i ++)
{
// START TIMER
e = getTickCount();
/* RESIZEZ OPERATION */
if(vxuHalfScaleGaussian(context,image,half_image,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image,half_image_2,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_2,half_image_3,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
if(vxuHalfScaleGaussian(context,half_image_3,half_image_4,3) != VX_SUCCESS)
{
cout <<"ERROR :"<<"failed to perform scaling"<<endl;
}
// STOP TIMER
sum += (getTickCount() - e) / getTickFrequency();
}
I think that the following code is mistake.
Mat cv_src1 = imread("br1.png", IMREAD_GRAYSCALE);
int width = 1280;
int height = 720;
I think that you should be set as follows.
Mat cv_src1 = imread("br1.png", IMREAD_GRAYSCALE);
vx_uint32 width = cv_src1.cols;
vx_uint32 height = cv_src1.rows;
And, I made sample code to reproduce.
But, VisionWorks(about 0.3ms) faster than GpuMat(about 0.4ms) on my environment.
https://gist.github.com/atinfinity/9c8c067db739b190ba17f2bd8dbe75d6
https://gist.github.com/atinfinity/e8c2f2da6486be51881e3924c13a311c
My environment is as follows.
GPU: NVIDIA GeForce GTX 680
OS: Windows 10 Pro 64bit
Compiler: Visual Studio 2013 Update5
VisionWorks:NVIDIA VisionWorks v1.0.25
OpenCV: OpenCV 3.1

OpenCV: How to use cvSobel?

I'm trying to find the gradient direction from the edges using OpenCv 2.4.5, but I'm having problem with cvSobel() and below is the error message and my code. I read somewhere that it might be due to the conversion between floating point(??) but I have no idea on how to fix it. Any Help??
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <iostream>
#include <stdlib.h>
#include "stdio.h"
using namespace cv;
using namespace std;
int main()
{
Mat im = imread("test1.jpg");
if (im.empty()) {
cout << "Cannot load image!" << endl;
}
Mat *dx, *dy;
dx = new Mat( Mat::zeros(im.rows, im.cols, 1));
dy = new Mat( Mat::zeros(im.rows, im.cols, 1));
imshow("Image", im);
// Convert Image to gray scale
Mat im_gray;
cvtColor(im, im_gray, CV_RGB2GRAY);
imshow("Gray", im_gray);
//trying to find the direction, but gives errors here
cvSobel(&im_gray, dx, 1,0,3);
waitKey(0);
return 0;
}
You are mixing the C++ and C api. cv::Mat is from the C++ api and CvArr* is from the C api.
here you are using The C api cvSobel on C++ classes.
//trying to find the direction, but gives errors here
cvSobel(&im_gray, dx, 1,0,3);
What happens if you do
cv::Sobel( im_gray, dx, im_gray.depth(), 1, 0, 3);
EDIT
and declare
Mat dx;
Mat dy;
I think this might solve your problem, I'm actually quite surprised your code compiles.

How to obtain a contour from points with OpenCV

I'm trying to obtain a ROI from an image using VC++ and OpenCV.
I managed to display an image, get the coordinates of a point when I click on it, store these coordinates in a vector and draw lines between these points on my image.
Here is my code:
//Includes
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
static int app;
static vector<Point2f> cont(6);
static Mat img = imread("C:/img.jpg",0);
void on_mouse(int, int, int, int, void* );
int main()
{
app = 0;
namedWindow("myWindow", CV_WINDOW_AUTOSIZE);
cvSetMouseCallback("myWindow", on_mouse, 0);
imshow("myWindow", img);
waitKey(0);
}
void on_mouse(int evt, int x, int y, int flags, void* param)
{
if(evt == CV_EVENT_LBUTTONDOWN)
{
Point pt(x,y);
if(app<6)
{
cont[app]=pt;
app++;
}
cout<<"Coordonnees du point pt : "<<x<<","<<y<<endl;
for (int i=0; i<6;i++)
{cout<<cont[i]<<endl;}
}
if(evt == CV_EVENT_RBUTTONDOWN)
{
for (int j=0;j<5;j++)
{
line(img,cont[(j)],cont[(j+1)],CV_RGB(255,0,0),2);
}
line(img,cont[(5)],cont[(0)],CV_RGB(255,0,0),2);
imshow("myWindow", img);
}
}
What I would like to obtain is a vector that contains the coordinates of all the points of the contour and ultimately a bianary matrix the size of my image that contains 0 if the pixel is not in the contour, else 1.
Thanks for your help.
Make single element vector< vector< Point> > and then use drawContours with CV_FILLED. Then you will have binary matrix you wanted.
I currently don't have IDE but code will be like following
vector< vector< Point> > contours;
contours.push_back(cont);//your cont
Mat output(img.rows,img.cols,CV_8UC1);//your img
drawContours(output, contours, 0, Scalar(1), CV_FILLED);//now you have binary image

Resources