Opencv - polynomial function fitting - opencv

In opencv (or other c++ lib), is there a similar function like matlab fit which can do 3d polynomial surface fitting (i.e. f(x,y)= p00 + p10*x + p01*y + p20*x^2 + p11*x*y + p02*y^2). Thanks

I don't think there is a lib in opencv but you can do like that :
int main( int argc, char** argv )
{
Mat z = imread("1449862093156643.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat M = Mat_<double>(z.rows*z.cols,6);
Mat I=Mat_<double>(z.rows*z.cols,1);
for (int i=0;i<z.rows;i++)
for (int j = 0; j < z.cols; j++)
{
double x=(j - z.cols / 2) / double(z.cols),y= (i - z.rows / 2) / double(z.rows);
M.at<double>(i*z.cols+j, 0) = x*x;
M.at<double>(i*z.cols+j, 1) = y*y;
M.at<double>(i*z.cols+j, 2) = x*y;
M.at<double>(i*z.cols+j, 3) = x;
M.at<double>(i*z.cols+j, 4) = y;
M.at<double>(i*z.cols+j, 5) = 1;
I.at<double>(i*z.cols+j, 0) = z.at<uchar>(i,j);
}
SVD s(M);
Mat q;
s.backSubst(I,q);
cout<<q;
imshow("Orignal",z);
cout<<q.at<double>(2,0);
Mat background(z.rows,z.cols,CV_8UC1);
for (int i=0;i<z.rows;i++)
for (int j = 0; j < z.cols; j++)
{
double x=(j - z.cols / 2) / double(z.cols),y= (i - z.rows / 2) / double(z.rows);
double quad=q.at<double>(0,0)*x*x+q.at<double>(1,0)*y*y+q.at<double>(2,0)*x*y;
quad+=q.at<double>(3,0)*x+q.at<double>(4,0)*y+q.at<double>(5,0);
background.at<uchar>(i,j) = saturate_cast<uchar>(quad);
}
imshow("Simulated background",background);
waitKey();
return 0;
}
Original post is here

There is an undocumented function in openCV (contrib.hpp) called cv::polyfit(). It takes as input a Mat of x coordinates and another Mat of y coordinates. Not very easy to use Mats for this but you can build a wrapper for sending a vector of cv::Point points.
vector <float> fitPoly(const vector <Point> &src, int order){
Mat src_x = Mat(src.size(), 1, CV_32F);
Mat src_y = Mat(src.size(), 1, CV_32F);
for (int i = 0; i < src.size(); i++){
src_x.at<float>(i, 0) = (float)src[i].x;
src_y.at<float>(i, 0) = (float)src[i].y;
}
return cv::polyfit(src_x, src_y, order);
}

Related

What's a c++ code difference between contrast stretch of RGB and grayscale images?

I'm trying grayscale contrast stretching with c++ program, so a source I follow for that is here. There is a code I've got so far:
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
int computeStretched(int x, int l1, int l2, int r1, int r2);
int main() {
Mat img = imread("pict6.jpg"); // Mat type initialization of an original image
Mat grayScaleImg(img.size(), CV_8UC1);; // Gray scale mat initialization
cvtColor(img, grayScaleImg, COLOR_BGR2GRAY); // conversion of image input to grayscaled view
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
for (int k = 0; k < 3; k++) {
int output = computeStretched(grayScaleImg.at<Vec3b>(i, j)[k], 70, 0, 200, 255);
stretch_result.at<Vec3b>(i, j) = saturate_cast<uchar>(output);
}
}
}
waitKey(0);
return 0;
}
int computeStretched(int x, int l1, int l2, int r1, int r2) {
float calcVal;
if (0 <= x && x <= l1) {
calcVal = (l2 / l1) * x;
}else if (l1 < x && x <= r1) {
calcVal = ((r2 - l2) / (r1 - l1)) * (x - l1) + l2;
}else if (r1 < x && x <= 255) {
calcVal = ((255 - r2)/(255 - r1)) * (x - r1) + r2;
}
return (int)calcVal;
}
Hovewer, the image I put to processing isn't RGB-converted, but grayscale one. I want to do same operation as in the sample code using plainly grayscale picture too. What must be changed in the listing above to enable that?
You have GrayScale image that means 1 channel.
so modify your code accordingly :
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
int output = computeStretched(grayScaleImg.at<uchar>(i, j), 70, 0, 200, 255);
stretch_result.at<uchar>(i, j) = saturate_cast<uchar>(output);
}
}
Initlize "stretch_result

Comparing openCv PnP with openGv PnP

I am trying to build a test project to compare the openCv solvePnP implementation with the openGv one.
the opencv is detailed here:
https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#solvepnp
and the openGv here:
https://laurentkneip.github.io/opengv/page_how_to_use.html
Using the opencv example code, I am finding a chessboard in an image, and constructing the matching 3d points. i run the cv pnp, then set up the Gv solver. the cv pnp runs fine, and prints the values:
//rotation
-0.003040771263293328, 0.9797142824436152, -0.2003763421317906;
0.0623096853748876, 0.2001735322445355, 0.977777101438374]
//translation
[-12.06549797067309;
-9.533070368412945;
37.6825295047483]
I test by reprojecting the 3d points, and it looks good.
The Gv Pnp, however, prints nan for all values. i have tried to follow the example code, but I must be making a mistake somewhere. The code is:
int main(int argc, char **argv) {
cv::Mat matImg = cv::imread("chess.jpg");
cv::Size boardSize(8, 6);
//Construct the chessboard model
double squareSize = 2.80;
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
objectPoints.push_back(
cv::Point3f(double(j * squareSize), float(i * squareSize), 0));
}
}
cv::Mat rvec, tvec;
cv::Mat cameraMatrix, distCoeffs;
cv::FileStorage fs("CalibrationData.xml", cv::FileStorage::READ);
fs["cameraMatrix"] >> cameraMatrix;
fs["dist_coeffs"] >> distCoeffs;
//Found chessboard corners
std::vector<cv::Point2f> imagePoints;
bool found = cv::findChessboardCorners(matImg, boardSize, imagePoints, cv::CALIB_CB_FAST_CHECK);
if (found) {
cv::drawChessboardCorners(matImg, boardSize, cv::Mat(imagePoints), found);
//SolvePnP
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
drawAxis(matImg, cameraMatrix, distCoeffs, rvec, tvec, squareSize);
}
//cv to matrix
cv::Mat R;
cv::Rodrigues(rvec, R);
std::cout << "results from cv:" << R << tvec << std::endl;
//START OPEN GV
//vars
bearingVectors_t bearingVectors;
points_t points;
rotation_t rotation;
//add points to the gv type
for (int i = 0; i < objectPoints.size(); ++i)
{
point_t pnt;
pnt.x() = objectPoints[i].x;
pnt.y() = objectPoints[i].y;
pnt.z() = objectPoints[i].z;
points.push_back(pnt);
}
/*
K is the common 3x3 camera matrix that you can compose with cx, cy, fx, and fy.
You put the image point into homogeneous form (append a 1),
multiply it with the inverse of K from the left, which gives you a normalized image point (a spatial direction vector).
You normalize that to norm 1.
*/
//to homogeneous
std::vector<cv::Point3f> imagePointsH;
convertPointsToHomogeneous(imagePoints, imagePointsH);
//multiply by K.Inv
for (int i = 0; i < imagePointsH.size(); i++)
{
cv::Point3f pt = imagePointsH[i];
cv::Mat ptMat(3, 1, cameraMatrix.type());
ptMat.at<double>(0, 0) = pt.x;
ptMat.at<double>(1, 0) = pt.y;
ptMat.at<double>(2, 0) = pt.z;
cv::Mat dstMat = cameraMatrix.inv() * ptMat;
//store as bearing vector
bearingVector_t bvec;
bvec.x() = dstMat.at<double>(0, 0);
bvec.y() = dstMat.at<double>(1, 0);
bvec.z() = dstMat.at<double>(2, 0);
bvec.normalize();
bearingVectors.push_back(bvec);
}
//create a central absolute adapter
absolute_pose::CentralAbsoluteAdapter adapter(
bearingVectors,
points,
rotation);
size_t iterations = 50;
std::cout << "running epnp (all correspondences)" << std::endl;
transformation_t epnp_transformation;
for (size_t i = 0; i < iterations; i++)
epnp_transformation = absolute_pose::epnp(adapter);
std::cout << "results from epnp algorithm:" << std::endl;
std::cout << epnp_transformation << std::endl << std::endl;
return 0;
}
Where am i going wrong in setting up the openGv Pnp solver?
Years later, i had this same issue, and solved it. To convert openCv to openGV bearing vectors, you can do this:
bearingVectors_t bearingVectors;
std::vector<cv::Point2f> dd2;
const int N1 = static_cast<int>(dd2.size());
cv::Mat points1_mat = cv::Mat(dd2).reshape(1);
// first rectify points and construct homogeneous points
// construct homogeneous points
cv::Mat ones_col1 = cv::Mat::ones(N1, 1, CV_32F);
cv::hconcat(points1_mat, ones_col1, points1_mat);
// undistort points
cv::Mat points1_rect = points1_mat * cameraMatrix.inv();
// compute bearings
points2bearings3(points1_rect, &bearingVectors);
using this function for the final conversion:
// Convert a set of points to bearing
// points Matrix of size Nx3 with the set of points.
// bearings Vector of bearings.
void points2bearings3(const cv::Mat& points,
opengv::bearingVectors_t* bearings) {
double l;
cv::Vec3f p;
opengv::bearingVector_t bearing;
for (int i = 0; i < points.rows; ++i) {
p = cv::Vec3f(points.row(i));
l = std::sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2]);
for (int j = 0; j < 3; ++j) bearing[j] = p[j] / l;
bearings->push_back(bearing);
}
}

Opencv Mat efficiency linearized by right triangle

How to efficiency linearized Mat (symmetric matrix) to one row by right triangle.
For example, when I have:
0aabbb
b0aaaa
ba0bba
bac0aa
aaaa0c
abcab0
and then from that I get:
aabbbaaaabbaaac
Something like this:
...
template<class T>
Mat SSMJ::triangleLinearized(Mat mat){
int c = mat.cols;
Mat row = Mat(1, ((c*c)-c)/2, mat.type());
int i = 0;
for(int y = 1; y < mat.rows; y++)
for(int x = y; x < mat.cols; x++) {
row.at<T>(i)=mat.at<T>(y, x);
i++;
}
return row;
}
...
Since data in your mat is just a 1d array stored in row.data you can do whatever you want with it. I don't think you will find anything more special (w/o using vectorized methods) than just copying from this array.
int rows = 6;
char data[] = { 0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5};
char result[100];
int offset = 0;
for (int i = 0; i < 5; offset += 5-i, i++) {
memcpy(&result[offset] , &data[rows * i + i + 1], 5 - i);
}
Or with opencv Mat it would be
int rows = mat.cols;
char result[100]; // you can calculate how much data u need
int offset = 0;
for (int i = 0; i < 5; offset += 5-i, i++) {
memcpy(&result[offset] , &mat.data[rows * i + i + 1], 5 - i);
}
Mat resultMat(1, offset, result);

Sharpening using Gaussian 3x3 filter works but the result is white pixels

The following program is not working. I am implementing Gaussian 3x3 filter to sharpen the image file FACE DETECTION.png, but the result is just showing white color. I think the value of sum is more than 255 in convolution operation. I need a solution...
CODE:
int main()
{
Mat src, dst;
float sum;
/// Load an image
src = imread("FACE DETECTION.png", 0);
if( !src.data )
{ return -1; }
// define the kernel
float Kernel[3][3] = {
{1.0, 2.0, 1.0},
{2.0, 4.0, 2.0},
{1.0, 2.0, 1.0}
};
dst = src.clone();
for(int y = 0; y < src.rows; y++)
for(int x = 0; x < src.cols; x++)
dst.at<uchar>(y,x) = 0.0;
//convolution operation
for(int y = 1; y < src.rows - 1; y++){
for(int x = 1; x < src.cols - 1; x++){
sum = 0.0;
for(int k = -1; k <= 1;k++){
for(int j = -1; j <=1; j++){
sum = sum + Kernel[j+1][k+1]*src.at<uchar>(y - j, x - k);
sum = sum>255? 255:sum;
sum = sum<0? 0:sum;
}
}
dst.at<uchar>(y,x) = sum;
}
}
namedWindow("final");
imshow("final", dst);
namedWindow("initial");
imshow("initial", src);
waitKey();
return 0;
}
The problem is your kernel. The sum of the kernel should be equal to one but yours is equal to 16.
kernel = 1/16* kernel;
will fix your problem.

How to draw Optical flow images from ocl::PyrLKOpticalFlow::dense()

How to draw Optical flow images from ocl::PyrLKOpticalFlow::dense() Which actually calculates both horizontal and vertical component of the Optical flow? So I don't know how to draw them. I'm new to opencv . Can anyone help me?
Syntax :
ocl::PyrLKOpticalFlow::dense(oclMat &prevImg, oclMat& nextImg, oclMat& u, oclMat &v,oclMat &err)
A well establische method used in the optical flow community is to display a motion vector field as a color coded image as you can see at one of the various data sets. E.g MPI dataset or the Middlebury dataset.
Therefor you estimate the length and the angle of your motion vector. And use a HSV to RGB colorspace transformation (see OpenCV cvtColor function) to create your color coded image. Use the angle of your motion vector as H (Hue) - channel and the normalized length as the S (Saturation) - channel and set V (Value) to 1. The the color of your image will show you the direction of your motion and the saturation the length ( speed ).
The code will should like this ( Note if use_value == true, the Saturation will be set to 1 and the Value channel is related to the motion vector length):
void FlowToRGB(const cv::Mat & inpFlow,
cv::Mat & rgbFlow,
const float & max_size ,
bool use_value)
{
if(inpFlow.empty()) return;
if( inpFlow.depth() != CV_32F)
throw(std::exception("FlowToRGB: error inpFlow wrong data type ( has be CV_32FC2"));
const float grad2deg = (float)(90/3.141);
cv::Mat pol(inpFlow.size(), CV_32FC2);
float mean_val = 0, min_val = 1000, max_val = 0;
float _dx, _dy;
for(int r = 0; r < inpFlow.rows; r++)
{
for(int c = 0; c < inpFlow.cols; c++)
{
cv::Point2f polar = cvmath::toPolar(inpFlow.at<cv::Point2f>(r,c));
polar.y *= grad2deg;
mean_val +=polar.x;
max_val = MAX(max_val, polar.x);
min_val = MIN(min_val, polar.x);
pol.at<cv::Point2f>(r,c) = cv::Point2f(polar.y,polar.x);
}
}
mean_val /= inpFlow.size().area();
float scale = max_val - min_val;
float shift = -min_val;//-mean_val + scale;
scale = 255.f/scale;
if( max_size > 0)
{
scale = 255.f/max_size;
shift = 0;
}
//calculate the angle, motion value
cv::Mat hsv(inpFlow.size(), CV_8UC3);
uchar * ptrHSV = hsv.ptr<uchar>();
int idx_val = (use_value) ? 2:1;
int idx_sat = (use_value) ? 1:2;
for(int r = 0; r < inpFlow.rows; r++, ptrHSV += hsv.step1())
{
uchar * _ptrHSV = ptrHSV;
for(int c = 0; c < inpFlow.cols; c++, _ptrHSV+=3)
{
cv::Point2f vpol = pol.at<cv::Point2f>(r,c);
_ptrHSV[0] = cv::saturate_cast<uchar>(vpol.x);
_ptrHSV[idx_val] = cv::saturate_cast<uchar>( (vpol.y + shift) * scale);
_ptrHSV[idx_sat] = 255;
}
}
cv::Mat rgbFlow32F;
cv::cvtColor(hsv, rgbFlow32F, CV_HSV2BGR);
rgbFlow32F.convertTo(rgbFlow, CV_8UC3);}
}
Python
Please refer to opt_flow.py#draw_flow
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
C++
Please can refer to tvl1_optical_flow.cpp#drawOpticalFlow
static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
{
dst.create(flow.size(), CV_8UC3);
dst.setTo(Scalar::all(0));
// determine motion range:
float maxrad = maxmotion;
if (maxmotion <= 0)
{
maxrad = 1;
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (!isFlowCorrect(u))
continue;
maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
}
}
}
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (isFlowCorrect(u))
dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
}
}
}
I did something like this in my code, a while ago:
calcOpticalFlowPyrLK(frame_prec,frame_cur,v_corners_prec[i],corners_cur,status, err);
for(int j=0; j<corners_cur.size(); j++){
if(status[j]){
line(frame_cur,v_corners_prec[i][j],corners_cur[j],colors[i]);
}
}
Basically I draw a line between the points tracked by the OF in this iteration and the previous ones, this draws the optical flow lines which represent the flow on the image.
Hope this helps..

Resources