How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}
Related
I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}
I have a pointcloud generated by scanning a planar surface using stereo cameras. I have generated features such as normals, fpfh etc and using this information I want to classify areas in the pointcloud. To enable the use of more traditional CNN approaches I want to convert this pointcloud to a multi-channel image in opencv. I have the pointcloud collapsed to the XY plane, and aligned to the X and Y axes so that I can create a bounding box for the image.
I am looking for ideas on how to proceed further with the mapping from points to pixels. Specifically, I am confused about the image size, and how to go about filling in each pixel with the appropriate data. (Overlapping points would be averaged out, empty ones will be labelled accordingly). Since this is an unorganized pointcloud, I do not have camera parameters to use, and I guess PCL's RangImage class would not work in my case.
Any help is appreciated!
Try creating an empty cv::Mat of predetermined size first. Then iterate through every pixel of that Mat to determine what value it should take.
Here is some code which does something similar to what you were describing:
cv::Mat makeImageFromPointCloud(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, std::string dimensionToRemove, float stepSize1, float stepSize2)
{
pcl::PointXYZI cloudMin, cloudMax;
pcl::getMinMax3D(*cloud, cloudMin, cloudMax);
std::string dimen1, dimen2;
float dimen1Max, dimen1Min, dimen2Min, dimen2Max;
if (dimensionToRemove == "x")
{
dimen1 = "y";
dimen2 = "z";
dimen1Min = cloudMin.y;
dimen1Max = cloudMax.y;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "y")
{
dimen1 = "x";
dimen2 = "z";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "z")
{
dimen1 = "x";
dimen2 = "y";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.y;
dimen2Max = cloudMax.y;
}
std::vector<std::vector<int>> pointCountGrid;
int maxPoints = 0;
std::vector<pcl::PointCloud<pcl::PointXYZI>::Ptr> grid;
for (float i = dimen1Min; i < dimen1Max; i += stepSize1)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr slice = passThroughFilter1D(cloud, dimen1, i, i + stepSize1);
grid.push_back(slice);
std::vector<int> slicePointCount;
for (float j = dimen2Min; j < dimen2Max; j += stepSize2)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr grid_cell = passThroughFilter1D(slice, dimen2, j, j + stepSize2);
int gridSize = grid_cell->size();
slicePointCount.push_back(gridSize);
if (gridSize > maxPoints)
{
maxPoints = gridSize;
}
}
pointCountGrid.push_back(slicePointCount);
}
cv::Mat mat(static_cast<int>(pointCountGrid.size()), static_cast<int>(pointCountGrid.at(0).size()), CV_8UC1);
mat = cv::Scalar(0);
for (int i = 0; i < mat.rows; ++i)
{
for (int j = 0; j < mat.cols; ++j)
{
int pointCount = pointCountGrid.at(i).at(j);
float percentOfMax = (pointCount + 0.0) / (maxPoints + 0.0);
int intensity = percentOfMax * 255;
mat.at<uchar>(i, j) = intensity;
}
}
return mat;
}
I want to swap the U and V bit in YUV format, from NV12
YYYYYYYY UVUV // each letter presents a bit
to NV21
YYYYYYYY VUVU
I leave the Y planar alone, and handle the U and V planar by the function below
uchar swap(uchar in) {
uchar out = ((in >> 1) & 0x55) | ((in << 1) & 0xaa);
return out;
}
But I cannot get the desired result, the colour of the output image still not correct.
How can I swap U and V planar correctly?
Found the problem. UV should be manipulated in byte format, not bit.
byte[] yuv = // ...
final int length = yuv.length;
for (int i1 = 0; i1 < length; i1 += 2) {
if (i1 >= width * height) {
byte tmp = yuv[i1];
yuv[i1] = yuv[i1+1];
yuv[i1+1] = tmp;
}
}
try this method (-_-)
IFrameCallback iFrameCallback = new IFrameCallback() {
#Override
public void onFrame(ByteBuffer frame) {
//get nv12 data
byte[] b = new byte[frame.remaining()];
frame.get(b);
//nv12 data to nv21
NV12ToNV21(b, 1280, 720);
//send NV21 data
BVPU.InputVideoData(nv21, nv21.length,
System.currentTimeMillis() * 1000, 1280, 720);
}
};
byte[] nv21;
private void NV12ToNV21(byte[] data, int width, int height) {
nv21 = new byte[data.length];
int framesize = width * height;
int i = 0, j = 0;
System.arraycopy(data, 0, nv21, 0, framesize);
for (i = 0; i < framesize; i++) {
nv21[i] = data[i];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j - 1] = data[j + framesize];
}
for (j = 0; j < framesize / 2; j += 2) {
nv21[framesize + j] = data[j + framesize - 1];
}
}
I am using Aptina 5Mp sensor with Fish-eye lens for capturing an image.
I am using following algorithm to correct lens distortion.
http://www.tannerhelland.com/4743/simple-algorithm-correcting-lens-distortion/
this is not correcting the image properly.
Any help will be appreciated.
//code----
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <math.h>
using namespace cv;
using namespace std;
// globals
Mat src, dst;
Mat map_x, map_y;
#define REMAP_WINDOW "Remap Circle"
void make_circle_map(float , float , float , float );
int main(int argc, char** argv) {
// load image
src = imread(argv[1], 1);
float qvDepth = atof(argv[2]);
float fixStrength = atof(argv[3]);
float fixZoom = atof(argv[4]);
float lensRadius = atof(argv[5]);
// create destination and the maps
dst.create(src.size(), src.type());
map_x.create(src.size(), CV_32FC1);
map_y.create(src.size(), CV_32FC1);
// create window
// namedWindow(REMAP_WINDOW, CV_WINDOW_AUTOSIZE);
make_circle_map(qvDepth, fixStrength, fixZoom, lensRadius);
remap(src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0));
//imshow(REMAP_WINDOW, dst);
imwrite("got1.jpg",dst);
// while(27 != waitKey()) {
// just wait
// }
// cvDestroyWindow(REMAP_WINDOW);
return 0;
}
void make_circle_map(float qvDepth, float fixStrength, float fixZoom, float lensRadius ) {
//ApplyLensCorrection(double fixStrength, double fixZoom, double lensRadius, long long edgeHandling, long long superSamplingAmount
cout<<"qvDepth :"<<qvDepth<<" fixStrength :"<<fixStrength<<" fixZoom :"<<fixZoom<<" lensRadius :"<<lensRadius<<endl;
//float qvDepth = 32;//24;
//float fixStrength = 4.5; // has to utilized further
//float fixZoom = 0.5;
//float lensRadius =2;
//Calculate the center of the image
//double midX = 0;
//double midY = 0;
long tWidth = 1944;
long tHeight = 2580;
// the center
double midX = (double)src.cols/2;
double midY = (double)src.rows/2;
//Rotation values
double theta = 0;
double sRadius = 0;
double sRadius2 = 0;
double sDistance = 0;
double radius = 0;
double j = 0;
double k = 0;
//X and Y values, remapped around a center point of (0, 0)
double nX = 0;
double nY = 0;
double QuickVal =0;
float ssX;
float ssY;
//Source X and Y values, which may or may not be used as part of a bilinear interpolation function
double srcX = 0;
double srcY = 0;
sRadius = sqrt(tWidth * tWidth + tHeight * tHeight) / 2;
cout<<"sRadius :"<<sRadius<<endl;
double refDistance = 0;//modified 0 to 2
if (fixStrength == 0)
{
fixStrength = 0.00000001;
}
refDistance = sRadius * 2 / fixStrength;
sRadius = sRadius * (lensRadius / 100);
sRadius2 = sRadius * sRadius;
cout<<"refDistance :"<<refDistance<<" sRadius :"<<sRadius<<" sRadius2 :"<<sRadius2<<endl;
float sampleIndex =1; //has to be changed in future
for (int x = 0; x <= tWidth; x++)
{
QuickVal = x * qvDepth;
for (int y = 0; y <= tHeight; y++)
{
//Remap the coordinates around a center point of (0, 0)
nX = x - midX;
nY = y - midY;
//Offset the pixel amount by the supersampling lookup table
for(int ii = 1; ii<4;ii++){
j = nX + ii;
k = nY + ii;
//Calculate distance automatically
sDistance = (j * j) + (k * k);
//cout<<"nx :"<<nX<<" ny :"<<nY<<" j :"<<j<<" k :"<<k<<" sDistance :"<<sDistance<<" sRadius2 :"<<sRadius2<<endl;
if (sDistance <= sRadius2)
{
sDistance = sqrt(sDistance);
radius = sDistance / refDistance;
if (radius == 0)
{
theta = 1;
}
else
{
theta = atan(radius) / radius;
}
//srcX = midX + theta * j * fixZoom;
//srcY = midY + theta * k * fixZoom;
map_x.at<float>(x,y) = midX + cos(fabs(theta)) * j * fixZoom;
map_y.at<float>(x,y) = midY + sin(fabs(theta)) * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x + cos(fabs(theta)) ;//* fixZoom;//x;
map_y.at<float>(x,y) = y + sin(fabs(theta)) ;//* fixZoom;//y;
}
}
}
}
}
Image
replace the following line.
map_x.at<float>(x,y) = midX + theta * j * fixZoom;
map_y.at<float>(x,y) = midY + theta * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x ;//* fixZoom;//x;
map_y.at<float>(x,y) = y ;//* fixZoom;//y;
use argument executable [image name], BBP, correction parameter, zoom parameter, applied ratio.
ex-> ./lensdistortcorrect image.jpg 24 6.2 2.2 100
I want process image so each pixel value will be mean of its value and 4 neighbours.
Created two different functions:
Mat meanImage(cv::Mat& inputImage)
{
Mat output;
Mat kernel(3,3,CV_32F,0.0);
kernel.at<float>(0,1) = 0.2;
kernel.at<float>(1,0) = 0.2;
kernel.at<float>(1,1) = 0.2;
kernel.at<float>(1,2) = 0.2;
kernel.at<float>(2,1) = 0.2;
filter2D(inputImage,output,-1,kernel);
return output;
}
and:
Mat meanImage2(Mat& inputImage)
{
Mat temp;
Mat output(inputImage.rows,inputImage.cols,inputImage.type());
copyMakeBorder(inputImage,temp,1,1,1,1,BORDER_REPLICATE);
CV_Assert(output.isContinuous());
CV_Assert(temp.isContinuous());
const int len = output.rows * output.cols * output.channels();
const int rowLenTemp = temp.cols * temp.channels();
const int twoRowLenTemp = 2 * rowLenTemp;
const int rowLen = output.cols * output.channels();
uchar* outPtr = output.ptr<uchar>(0);
uchar* tempPtr = temp.ptr<uchar>(0);
for(int i = 0; i < len; ++i)
{
const int a = 6 * (i / rowLen) + 3;
outPtr[i] = (tempPtr[i+rowLenTemp+a] + tempPtr[i+a] +
tempPtr[i+rowLenTemp+a+3] + tempPtr[i+rowLenTemp+a-3] +
tempPtr[i+twoRowLenTemp+a]) / 5;
}
return output;
}
I've assumed that the result will be the same. So I've compared images:
Mat diff;
compare(meanImg1,meanImg2,diff,CMP_NE);
printf("Difference: %d\n",countNonZero(diff));
imshow("diff",diff);
And get a lot off differences. What is the difference between this functions?
Edit:
Difference for lena image taken from Lena
Beware that when you do the sum of pixels, you add unsigned chars and you may overflow.
Test your code by casting these pixels values to int.
outPtr[i] = ((int)tempPtr[i+rowLenTemp+a] + (int)tempPtr[i+a] +
(int)tempPtr[i+rowLenTemp+a+3] + (int)tempPtr[i+rowLenTemp+a-3] +
(int)tempPtr[i+twoRowLenTemp+a]) / 5;
Edit: I'd rather code this like (assuming image type is uchar and it has 3 channels)
for (int r = 0; r < output.rows; r++)
{
uchar* previousRow = temp.ptr<uchar>(r) + 3;
uchar* currentRow = temp.ptr<uchar>(r+1) + 3;
uchar* nextRow = temp.ptr<uchar>(r+2) + 3;
uchar* outRow = output.ptr<uchar>(r);
for (int c = 0; c < 3*output.cols; c++)
{
int value = (int)previousRow[c] +
(int)currentRow[c-3] + (int)currentRow [c] + (int)currentRow[c+3] +
(int)nextRow [c];
outRow[c] = value / 5;
}
}