Determining Joint Rotations in Gazebo using KDL - robotics

I am trying to use the Kinematic Dynamics Library (KDL) within Gazebo to make a "robot arm" position it's palm onto a target model. My current issue is that when I rotate my joints to the computed values, my robot is not at all where I expect it to be and does not touch the target cylinder. I have tried asking the Gazebo community for an answer, but I have not gotten a response so I was hoping that someone at stack overflow could be of assistance. Pasted below is the relevant code:
//Create a KDL Joint Chain of our Arm Robot
KDL::Chain chain;
//Base Joint
math::Vector3 basePose = mBaseJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotZ),
KDL::Frame(KDL::Vector(basePose.x, basePose.y, basePose.z))));
//Shoulder Joint
math::Vector3 shoulderPose = mShoulderJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(shoulderPose.x, shoulderPose.y, shoulderPose.z))));
//First Elbow Joint
math::Vector3 firstElbowPose = mFirstElbowJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(firstElbowPose.x, firstElbowPose.y, firstElbowPose.z))));
//Second Elbow Joint
math::Vector3 secondElbowPose = mSecondElbowJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(secondElbowPose.x, secondElbowPose.y, secondElbowPose.z))));
//End Joint
math::Vector3 endPose = mWristJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::None),
KDL::Frame(KDL::Vector(endPose.x, endPose.y, endPose.z))));
KDL::ChainIkSolverPos_LMA myIk = KDL::ChainIkSolverPos_LMA(chain);
//Create joint array
jointCount = chain.getNrOfJoints();
KDL::JntArray jointPositions = KDL::JntArray(jointCount);
targetRotation = KDL::JntArray( jointCount );
for(int i = 0; i < jointCount - 1; i++)
{
jointPositions(i) = mJoints[i]->GetAngle(0).Radian();
}
physics::ModelPtr target = mModel->GetWorld()->GetModel("Target");
math::Pose pose = target->GetWorldPose();
math::Pose myPose = mModel->GetWorldPose();
double xPosition = pose.pos.x - myPose.pos.x;
double yPosition = pose.pos.y - myPose.pos.y;
double zPosition = pose.pos.z - myPose.pos.z;
KDL::Frame cartesianPosition = KDL::Frame(KDL::Vector(xPosition, yPosition, zPosition));
gzdbg << "Calculating..." << std::endl;
bool kinematics_status;
kinematics_status = myIk.CartToJnt( jointPositions, cartesianPosition, targetRotation );
gzdbg << "Final Status: " << kinematics_status << std::endl;
if(kinematics_status >= 0)
{
for( int i = 0; i < jointCount - 1; i++ )
{
mJoints[i]->SetAngle(0, targetRotation(i));
}
}//Create a KDL Joint Chain of our Arm Robot
KDL::Chain chain;
//Base Joint
math::Vector3 basePose = mBaseJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotZ),
KDL::Frame(KDL::Vector(basePose.x, basePose.y, basePose.z))));
//Shoulder Joint
math::Vector3 shoulderPose = mShoulderJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(shoulderPose.x, shoulderPose.y, shoulderPose.z))));
//First Elbow Joint
math::Vector3 firstElbowPose = mFirstElbowJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(firstElbowPose.x, firstElbowPose.y, firstElbowPose.z))));
//Second Elbow Joint
math::Vector3 secondElbowPose = mSecondElbowJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::RotX),
KDL::Frame(KDL::Vector(secondElbowPose.x, secondElbowPose.y, secondElbowPose.z))));
//End Joint
math::Vector3 endPose = mWristJoint->GetAnchor(0);
chain.addSegment(KDL::Segment(KDL::Joint(KDL::Joint::None),
KDL::Frame(KDL::Vector(endPose.x, endPose.y, endPose.z))));
KDL::ChainIkSolverPos_LMA myIk = KDL::ChainIkSolverPos_LMA(chain);
//Create joint array
jointCount = chain.getNrOfJoints();
KDL::JntArray jointPositions = KDL::JntArray(jointCount);
targetRotation = KDL::JntArray( jointCount );
for(int i = 0; i < jointCount - 1; i++)
{
jointPositions(i) = mJoints[i]->GetAngle(0).Radian();
}
physics::ModelPtr target = mModel->GetWorld()->GetModel("Target");
math::Pose pose = target->GetWorldPose();
math::Pose myPose = mModel->GetWorldPose();
double xPosition = pose.pos.x - myPose.pos.x;
double yPosition = pose.pos.y - myPose.pos.y;
double zPosition = pose.pos.z - myPose.pos.z;
KDL::Frame cartesianPosition = KDL::Frame(KDL::Vector(xPosition, yPosition, zPosition));
gzdbg << "Calculating..." << std::endl;
bool kinematics_status;
kinematics_status = myIk.CartToJnt( jointPositions, cartesianPosition, targetRotation );
gzdbg << "Final Status: " << kinematics_status << std::endl;
if(kinematics_status >= 0)
{
for( int i = 0; i < jointCount - 1; i++ )
{
mJoints[i]->SetAngle(0, targetRotation(i));
}
}

Related

ML.NET machine learning model not producing the same results using the same data

We are running the code below for a number of different days. There is a set of data used for each day but for a given day the data does not change.
If we run the algorithm a number of times for one day then the cluster results created are consistent.
If we run the algorithm a number of times for a number of days then sometimes the cluster results created are NOT consistent for one (or more) days.
for (int i = 1; i <= 4; i++)
{
MLContext mlContext = new MLContext(seed: 100);
IDataView trainingData = mlContext.Data.LoadFromEnumerable<Data>(filteredData);
var options = new KMeansTrainer.Options
{
NumberOfClusters = i,
NumberOfThreads = (i == 1) ? 1 : 50,
InitializationAlgorithm = InitializationAlgorithm.Random
};
// Set up a learning pipeline
// Step 1: concatenate input features into a single column
var pipeline = mlContext.Transforms.Concatenate(
"Features",
"Value")
// Step 2: use the k-means clustering algorithm
.Append(mlContext.Clustering.Trainers.KMeans(options));
try
{
using (TransformerChain<ClusteringPredictionTransformer<KMeansModelParameters>> model = pipeline.Fit(trainingData))
{
VBuffer<float>[] centroids = null;
var last = model.LastTransformer;
last.Model.GetClusterCentroids(ref centroids, out int k);
#if DUMP_METRICS
// Evaluate the overall metrics
IDataView transformedData = model.Transform(trainingData);
ClusteringMetrics metrics = mlContext.Clustering.Evaluate(transformedData, null, "Score", "Features");
Debug.WriteLine($"Average Distance: " + $"{metrics.AverageDistance:F2}");
#endif
// Get all centroids
ClusterInfo clusterInfo = new ClusterInfo();
clusterInfo.NumberOfClusters = k;
List<CenterInfo> centerInfos = new List<CenterInfo>(k);
for (int j = 0; j < k; j++)
{
CenterInfo centerInfo = new CenterInfo();
centerInfo.Value = centroids[j].GetValues().ToArray().FirstOrDefault();
centerInfo.WithinSS = 0;
centerInfos.Add(centerInfo);
}
// For each reading in the data find out which value is closest
for (int y = 0; y < filteredData.Count; y++)
{
float value = filteredData[y].Value;
List<double> distances = new List<double>();
for (int j = 0; j < k; j++) distances.Add(Math.Pow(value - centerInfos[j].Value, 2));
double minDistance = distances.Min();
int index = distances.FindIndex(a => a == minDistance);
Debug.Assert(value == data[y + indexRange.StartIndex]);
centerInfos[index].AddSample(new Sample<float>(data.FlagEncodedTimeAtIndex(y + indexRange.StartIndex), value));
centerInfos[index].WithinSS += minDistance;
}
Debug.Assert(centerInfos.Sum(a => a.NumberOfSamples) == filteredData.Count());
clusterInfo.TotalWithinSS = centerInfos.Sum(a => a.WithinSS);
clusterInfo.CenterInfos = centerInfos.OrderBy(a => a.WithinSS).ToList();
clusterInfos.Add(clusterInfo);
#if EXTRA_LOGGING
foreach (CenterInfo centerInfo in centerInfos)
{
Debug.WriteLine("Centre = " + $"{centerInfo.Value:F2}" + ", No. Samples = " + $"{centerInfo.NumberOfSamples}" + ", WithinSS = " + $"{centerInfo.WithinSS:F2}");
}
#endif
}
}
catch (InvalidOperationException e)
{
if (s_log.IsErrorEnabled) s_log.ErrorFormat("Error calculating cluster values ('{0}').", e.Message);
}
}
There is clearly some sort of reset that needs to be done in the code but I cannot see what I am missing.

Converting a pointcloud to a depth/multi channel image

I have a pointcloud generated by scanning a planar surface using stereo cameras. I have generated features such as normals, fpfh etc and using this information I want to classify areas in the pointcloud. To enable the use of more traditional CNN approaches I want to convert this pointcloud to a multi-channel image in opencv. I have the pointcloud collapsed to the XY plane, and aligned to the X and Y axes so that I can create a bounding box for the image.
I am looking for ideas on how to proceed further with the mapping from points to pixels. Specifically, I am confused about the image size, and how to go about filling in each pixel with the appropriate data. (Overlapping points would be averaged out, empty ones will be labelled accordingly). Since this is an unorganized pointcloud, I do not have camera parameters to use, and I guess PCL's RangImage class would not work in my case.
Any help is appreciated!
Try creating an empty cv::Mat of predetermined size first. Then iterate through every pixel of that Mat to determine what value it should take.
Here is some code which does something similar to what you were describing:
cv::Mat makeImageFromPointCloud(pcl::PointCloud<pcl::PointXYZI>::Ptr cloud, std::string dimensionToRemove, float stepSize1, float stepSize2)
{
pcl::PointXYZI cloudMin, cloudMax;
pcl::getMinMax3D(*cloud, cloudMin, cloudMax);
std::string dimen1, dimen2;
float dimen1Max, dimen1Min, dimen2Min, dimen2Max;
if (dimensionToRemove == "x")
{
dimen1 = "y";
dimen2 = "z";
dimen1Min = cloudMin.y;
dimen1Max = cloudMax.y;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "y")
{
dimen1 = "x";
dimen2 = "z";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.z;
dimen2Max = cloudMax.z;
}
else if (dimensionToRemove == "z")
{
dimen1 = "x";
dimen2 = "y";
dimen1Min = cloudMin.x;
dimen1Max = cloudMax.x;
dimen2Min = cloudMin.y;
dimen2Max = cloudMax.y;
}
std::vector<std::vector<int>> pointCountGrid;
int maxPoints = 0;
std::vector<pcl::PointCloud<pcl::PointXYZI>::Ptr> grid;
for (float i = dimen1Min; i < dimen1Max; i += stepSize1)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr slice = passThroughFilter1D(cloud, dimen1, i, i + stepSize1);
grid.push_back(slice);
std::vector<int> slicePointCount;
for (float j = dimen2Min; j < dimen2Max; j += stepSize2)
{
pcl::PointCloud<pcl::PointXYZI>::Ptr grid_cell = passThroughFilter1D(slice, dimen2, j, j + stepSize2);
int gridSize = grid_cell->size();
slicePointCount.push_back(gridSize);
if (gridSize > maxPoints)
{
maxPoints = gridSize;
}
}
pointCountGrid.push_back(slicePointCount);
}
cv::Mat mat(static_cast<int>(pointCountGrid.size()), static_cast<int>(pointCountGrid.at(0).size()), CV_8UC1);
mat = cv::Scalar(0);
for (int i = 0; i < mat.rows; ++i)
{
for (int j = 0; j < mat.cols; ++j)
{
int pointCount = pointCountGrid.at(i).at(j);
float percentOfMax = (pointCount + 0.0) / (maxPoints + 0.0);
int intensity = percentOfMax * 255;
mat.at<uchar>(i, j) = intensity;
}
}
return mat;
}

Converting from pixel coordinates to meter coordinates

I would like to convert pixel coordinates to meter coordinates. My camera has radial distortion, and I already have camera parameters, I use cvUndistort function of Opencv to get Undistort image, then use these 2 formulas:
(u0, v0 are principal points coordinates(in pixel). px, py are focal lengths(in pixel))
x = (u-u0)/px
y = (v-v0)/py
I finally want to calculate moments in metric coordinates.
my code:
CvMat* camera_matrix;
CvMat* distortion_coeffs;
camera_matrix = cvCreateMat(3, 3, CV_32FC1 );
distortion_coeffs = cvCreateMat(1, 5, CV_32FC1 );
double k1 = -0.33060;
double k2 = 0.11170;
double k3 = 0;
double k4 = 0;
double k5 = 0;
double fx = 195.0507;
double fy = 195.0992;
double cx = 162.4085;
double cy = 127.1973;
CV_MAT_ELEM(*(camera_matrix),float,0, 0 ) = (float)fx;
CV_MAT_ELEM(*(camera_matrix),float,0, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,0, 2 ) = (float)cx;
CV_MAT_ELEM(*(camera_matrix),float,1, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,1, 1 ) = (float)fy;
CV_MAT_ELEM(*(camera_matrix),float,1, 2 ) = (float)cy;
CV_MAT_ELEM(*(camera_matrix),float,2, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 2 ) = 1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 0 ) = (float)k1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 1 ) = (float)k2;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 2 ) = (float)k3;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 3 ) = (float)k4;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 4 ) = (float)k5;
cvUndistort2( image, Undistort_img, camera_matrix,distortion_coeffs );
//**********************************************************************
//Threshold image
//**********************************************************************
cvSmooth( Undistort_img, smoothed_image,CV_BLUR,3,3,0,0);
for(int i = 0; i < smoothed_image->height; i++)
{
for(int j = 0; j < smoothed_image->width; j++)
{
s = cvGet2D(smoothed_image,i,j);
if (((float)s.val[0]) >= 210)
cvSet2D(tr_img, i, j, white_value);
else
cvSet2D(tr_img, i, j, black_value);
}
}
//*************************************************************
//moment calculation in metric coordinates
//*************************************************************
for(int i = 0;i < tr_img->height; i++)
{
for(int j = 0; j < tr_img->width; j++)
{
if(CV_MAT_ELEM(*(tr_img),float,i,j) != 0)
{
x = (i-u0)/px;
y = (j-v0)/py;
kern1[0][0] = 1 + kern1[0][0];
kern1[1][0] = x * 1 + kern1[1][0];
kern1[0][1] = y * 1 + kern1[0][1];
kern1[2][0] = x * x * 1 + kern1[2][0];
kern1[0][2] = y * y * 1 + kern1[0][2];
kern1[1][1] = x * y * 1 + kern1[1][1];
}
}
}
moments->m00 = (float)kern1[0][0];
moments->m10 = (float)kern1[0][1];
moments->m01 = (float)kern1[1][0];
moments->m11 = (float)kern1[1][1];
moments->m20 = (float)kern1[0][2];
moments->m02 = (float)kern1[2][0];
as we know, normalizing centers of mass are calculated based on:(Z_star and m00_star are the values in initial point):
xn = xg * an = (m10/m00)* Z_star*sqrt(m00_star/m00)
yn = yg * an = (m01/m00)* Z_star*sqrt(m00_star/m00)
these two values should not change when camera just has transmission along optical axis(z axis). but my values change considerably, would you please help me?

How to convert cv::Mat to pcl::pointcloud

How to get from a opencv Mat pointcloud to a pcl::pointcloud? The color is not important for me only the points itself.
you can do this like:
pcl::PointCloud<pcl::PointXYZ>::Ptr SimpleOpenNIViewer::MatToPoinXYZ(cv::Mat OpencVPointCloud)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);//(new pcl::pointcloud<pcl::pointXYZ>);
for(int i=0;i<OpencVPointCloud.cols;i++)
{
//std::cout<<i<<endl;
pcl::PointXYZ point;
point.x = OpencVPointCloud.at<float>(0,i);
point.y = OpencVPointCloud.at<float>(1,i);
point.z = OpencVPointCloud.at<float>(2,i);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr -> points.push_back(point);
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 1;
return point_cloud_ptr;
}
And also the otherway
cv::Mat MVW_ICP::PoinXYZToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr){
cv::Mat OpenCVPointCloud(3, point_cloud_ptr->points.size(), CV_64FC1);
for(int i=0; i < point_cloud_ptr->points.size();i++){
OpenCVPointCloud.at<double>(0,i) = point_cloud_ptr->points.at(i).x;
OpenCVPointCloud.at<double>(1,i) = point_cloud_ptr->points.at(i).y;
OpenCVPointCloud.at<double>(2,i) = point_cloud_ptr->points.at(i).z;
}
return OpenCVPointCloud;
}
To convert from a range image captured by a Kinect sensor and represented by depthMat to a pcl::PointCloud you can try this function. The calibration parameters are those used here.
{
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPoinXYZ(cv::Mat depthMat)
{
pcl::PointCloud<pcl::PointXYZ>::Ptr ptCloud (new pcl::PointCloud<pcl::PointXYZ>);
// calibration parameters
float const fx_d = 5.9421434211923247e+02;
float const fy_d = 5.9104053696870778e+02;
float const cx_d = 3.3930780975300314e+02;
float const cy_d = 2.4273913761751615e+02;
unsigned char* p = depthMat.data;
for (int i = 0; i<depthMat.rows; i++)
{
for (int j = 0; j < depthMat.cols; j++)
{
float z = static_cast<float>(*p);
pcl::PointXYZ point;
point.z = 0.001 * z;
point.x = point.z*(j - cx_d) / fx_d;
point.y = point.z *(cy_d - i) / fy_d;
ptCloud->points.push_back(point);
++p;
}
}
ptCloud->width = (int)depthMat.cols;
ptCloud->height = (int)depthMat.rows;
return ptCloud;
}
}

Gini Impurity, growing random trees in opencv

Goal: To add offset-impurity to the split decision of growing trees in openCV.
Currently in opencv random trees, the split is made as following:
if( !priors )
{
int L = 0, R = n1;
for( i = 0; i < m; i++ )
rsum2 += (double)rc[i]*rc[i];
for( i = 0; i < n1 - 1; i++ )
{
int idx = responses[sorted_indices[i]];
int lv, rv;
L++; R--;
lv = lc[idx]; rv = rc[idx];
lsum2 += lv*2 + 1;
rsum2 -= rv*2 - 1;
lc[idx] = lv + 1; rc[idx] = rv - 1;
if( values[i] + epsilon < values[i+1] )
{
double val = (lsum2*R + rsum2*L)/((double)L*R);
if( best_val < val )
{
best_val = val;
best_i = i;
}
}
}
}
Its using the gini impurity.
Anyone who can explain how the code accomplish this, from what i get it: initially it puts all class counts in the right node, and while moving one instance from right to left and update lsum2 and rsum2 it find the best solution. What i dont get is how p_j^2 is related to lv*2 +1 or rv*2-1.
The real question, if having offsets available and would like to add a split based on the impurity of simularity of offsets. (offsets are direction and distance from center to the current node.
What i have come up with is something like this, and if anyone can point out any flaws it would be good, because atm its not giving good results and im not sure where to start debugging.
//Compute mean
for(i = 0; i<n1;++i)
{
float* point = (float*)(points.data + rstep*sample_idx_src[sorted_indices[i]]);
meanx[responses[sorted_indices[i]]] += point[0];
meany[responses[sorted_indices[i]]] += point[1];
}
for(i = 0;i<m;++i)
{
meanx[i] /= rc0[i];
meany[i] /= rc0[i];
}
if(!priors)
{
int L = 0, R = n1;
for(i=0;i<n1;i++)
{
float* point = (float*)(points.data + rstep*sample_idx_src[sorted_indices[i]]);
double tmp = point[0] - meanx[responses[sorted_indices[i]]];
rsum2 += tmp*tmp;
tmp = point[1] -meany[responses[sorted_indices[i]]];
rsum2 += tmp*tmp;
}
double minDist = DBL_MAX;
for(i=0;i<n1;++i)
{
float* point = (float*)(points.data + rstep*sample_idx_src[sorted_indices[i]]);
++L; --R;
double tmp = point[0] - meanx[responses[sorted_indices[i]]];
lsum2 += tmp*tmp;
tmp = point[1] -meany[responses[sorted_indices[i]]];
lsum2 += tmp*tmp;
tmp = point[0] - meanx[responses[sorted_indices[i]]];
rsum2 -= tmp*tmp;
tmp = point[1] -meany[responses[sorted_indices[i]]];
rsum2 -= tmp*tmp;
if( values[i] + epsilon < values[i+1] )
{
double val = (lsum2 + rsum2)/((double)L*R);
if(val < minDist )
{
minDist = val;
best_val = -val;
best_i = i;
}
}
}
Ok, the Gini coefficient in this case is simple because there are only two groups, left and right. So instead of a large sum 1-sum(pj*pj) we have 1-pl*pl-pr*pr. The proportion of items on the left pl is the number of items on the left lv divided by the total.
Now as we shift the split, pl*pl and pr*pr change, but not because the total number of items changes. So instead of optimizing pr and pl (which are floating-point numbers) we optimize lv and rv (which are simple counts).
Next, the question why 2*lv+1. That's simple: we're increasing lv = lv=1 to optimize lv*lv. Now (lv+1)*(lv+1) - (lv*lv) (the increase) happens to be 2*lv+1 if you write out all the terms. And the decrease (rv-1)*(rv-1) - (rv*rv) happens to be -2*rv+1 or -(r*rv+1).

Resources