Related
I want to plot histogram in OpenCV C++. The task is that x-axis should be angle and y-axis should be magnitude of histogram. I calculate magnitude and angle by using Sobel operator. Now how can I plot histogram by using magnitude and angle?
Thanks in advance. The simple code of problem is
// Read image
Mat img = imread("abs.jpg");
img.convertTo(img, CV_32F, 1 / 255.0);
/*GaussianBlur(img, img, Size(3, 3), 0, 0, BORDER_CONSTANT);*/
// Calculate gradients gx, gy
Mat gx, gy;
Sobel(img, gx, CV_32F, 1, 0, 1);
Sobel(img, gy, CV_32F, 0, 1, 1);
// C++ Calculate gradient magnitude and direction (in degrees)
Mat mag, angle;
cartToPolar(gx, gy, mag, angle, 1);
imshow("magnitude of image is", mag);
imshow("angle of image is", angle);
Ok, So the first part of it is to calculate the histogram of each of them. Since both are separated already (in their own Mat) we do not have to split them or anything, and we can use them directly in the calcHist function of OpenCV.
By the documentation we have:
void calcHist(const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
So you would have to do:
cv::Mat histMag, histAng;
// number of bins of the histogram, adjust to your liking
int histSize = 10;
// degrees goes from 0-360 if radians then change acordingly
float rangeAng[] = { 0, 360} ;
const float* histRangeAng = { rangeAng };
double minval, maxval;
// get the range for the magnitude
cv::minMaxLoc(mag, &minval, &maxval);
float rangeMag[] = { static_cast<float>(minval), static_cast<float>(maxval)} ;
const float* histRangeMag = { rangeMag };
cv::calcHist(&mag, 1, 0, cv::NoArray(), histMag, 1, &histSize, &histRangeMag, true, false);
cv::calcHist(&angle, 1, 0, cv::NoArray(), histAng, 1, &histSize, &histRangeAng, true, false);
Now you have to plot the two histograms found in histMag and histAng.
In the turtorial I posted in the comments you have lines in the plot, for the angle it would be something like this:
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
cv::Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
/// Normalize the result to [ 0, histImage.rows ]
cv::normalize(histAng, histAng, 0, histImage.rows, cv::NORM_MINMAX, -1, Mat() );
// Draw the lines
for( int i = 1; i < histSize; i++ )
{
cv::line( histImage, cv::Point( bin_w*(i-1), hist_h - cvRound(histAng.at<float>(i-1)) ) ,
cv::Point( bin_w*(i), hist_h - cvRound(histAng.at<float>(i)) ),
cv::Scalar( 255, 0, 0), 2, 8, 0 );
}
With this you can do the same for the magnitude, or maybe turn it into a function which draws histograms if they are supplied.
In the documentation they have another option, to draw rectangles as the bins, adapting it to our case, we get something like:
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = std::round( static_cast<double>(hist_w)/static_cast<double>(histSize) );
cv::Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
/// Normalize the result to [ 0, histImage.rows ]
cv::normalize(histAng, histAng, 0, histImage.rows, cv::NORM_MINMAX, -1, Mat() );
for( int i = 1; i < histSize; i++ )
{
cv::rectangle(histImage, cv::Point(bin_w*(i-1), hist_h - static_cast<int>(std::round(histAng.at<float>(i-1)))), cv::Point(bin_w*(i), hist_h),);
}
Again, this can be done for the magnitude as well in the same way. This are super simple plots, if you need more complex or beautiful plots, you may need to call an external library and pass the data inside the calculated histograms. Also, this code has not been tested, so it may have a typo or error, but if something fails, just write a comment and we can find a solution.
I hope this helps you, and sorry for the late answer.
I'm trying to generate a bird's eye view from an image. For the camera intrinsics and disortions, I'm using hard coded values that I retrieved from a driving simulator that has a camera mounted on it's roof.
The basis for the code is from "Learning OpenCV Computer Vision with the OpenCV Library", Pg 409.
When I run the code on an image containing a chess board with 3 inner corners per row and 4 inner corners per column, my bird's eye view is upside down. I need the image to correctly turn into a bird's eye and that is right side up because I need the homography matrix for another function call.
Here are the input and output images, and the code i'm using:
Input image:
Corners detected:
Output Image/bird's eye (upside down!):
The code:
#include <highgui.h>
#include <cv.h>
#include <cxcore.h>
#include <math.h>
#include <vector>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[]) {
if(argc != 4) return -1;
// INPUT PARAMETERS:
//
int board_w = atoi(argv[1]); //inner corners per row
int board_h = atoi(argv[2]); //inner corners per column
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
//Hard coded intrinsics for the camera
Mat intrinsicMat = (Mat_<double>(3, 3) <<
418.7490, 0., 236.8528,
0.,558.6650,322.7346,
0., 0., 1.);
//Hard coded distortions for the camera
CvMat* distortion = cvCreateMat(1, 4, CV_32F);
cvmSet(distortion, 0, 0, -0.0019);
cvmSet(distortion, 0, 1, 0.0161);
cvmSet(distortion, 0, 2, 0.0011);
cvmSet(distortion, 0, 3, -0.0016);
IplImage* image = 0;
IplImage* gray_image = 0;
if( (image = cvLoadImage(argv[3])) == 0 ) {
printf("Error: Couldn’t load %s\n",argv[3]);
return -1;
}
gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
cvCvtColor(image, gray_image, CV_BGR2GRAY );
// UNDISTORT OUR IMAGE
//
IplImage* mapx = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
CvMat intrinsic (intrinsicMat);
//This initializes rectification matrices
//
cvInitUndistortMap(
&intrinsic,
distortion,
mapx,
mapy
);
IplImage *t = cvCloneImage(image);
// Rectify our image
//
cvRemap( t, image, mapx, mapy );
// GET THE CHESSBOARD ON THE PLANE
//
cvNamedWindow("Chessboard");
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
int corner_count = 0;
int found = cvFindChessboardCorners(
image,
board_sz,
corners,
&corner_count,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
);
if(!found){
printf("Couldn’t aquire chessboard on %s, "
"only found %d of %d corners\n",
argv[3],corner_count,board_n
);
return -1;
}
//Get Subpixel accuracy on those corners:
cvFindCornerSubPix(
gray_image,
corners,
corner_count,
cvSize(11,11),
cvSize(-1,-1),
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
);
//GET THE IMAGE AND OBJECT POINTS:
// We will choose chessboard object points as (r,c):
// (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1).
//
CvPoint2D32f objPts[4], imgPts[4];
imgPts[0] = corners[0];
imgPts[1] = corners[board_w-1];
imgPts[2] = corners[(board_h-1)*board_w];
imgPts[3] = corners[(board_h-1)*board_w + board_w-1];
objPts[0].x = 0; objPts[0].y = 0;
objPts[1].x = board_w -1; objPts[1].y = 0;
objPts[2].x = 0; objPts[2].y = board_h -1;
objPts[3].x = board_w -1; objPts[3].y = board_h -1;
// DRAW THE POINTS in order: B,G,R,YELLOW
//
cvCircle( image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3); //blue
cvCircle( image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3); //green
cvCircle( image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3); //red
cvCircle( image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3); //yellow
// DRAW THE FOUND CHESSBOARD
//
cvDrawChessboardCorners(
image,
board_sz,
corners,
corner_count,
found
);
cvShowImage( "Chessboard", image );
// FIND THE HOMOGRAPHY
//
CvMat *H = cvCreateMat( 3, 3, CV_32F);
cvGetPerspectiveTransform( objPts, imgPts, H);
Mat homography = H;
cvSave("Homography.xml",H); //We can reuse H for the same camera mounting
/**********************GENERATING 3X4 MATRIX***************************/
// LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
//
float Z = 23;
int key = 0;
IplImage *birds_image = cvCloneImage(image);
cvNamedWindow("Birds_Eye");
// LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
//
// escape key stops
//
while(key != 27) {
// Set the height
//
CV_MAT_ELEM(*H,float,2,2) = Z;
// COMPUTE THE FRONTAL PARALLEL OR BIRD’S-EYE VIEW:
// USING HOMOGRAPHY TO REMAP THE VIEW
//
cvWarpPerspective(
image,
birds_image,
H,
CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
);
cvShowImage( "Birds_Eye", birds_image );
imwrite("/home/lee/bird.jpg", birds_image);
key = cvWaitKey();
if(key == 'u') Z += 0.5;
if(key == 'd') Z -= 0.5;
}
return 0;
}
The homography result seems correct. Since you're mapping the camera's z-axe as the world's y-axe, the image resulting of the bird's eye view (BEV) remap is upside down.
If you really need the BEV image as the camera shot you can have use H as H = Ty * Rx * H, where R is a 180 degree rotation around x-axe, T is a translation in y-axe and H is your original homography. The translation is required since your rotation remapped your old BEV on the negative side of y-axe.
I was trying to sharpening on some standard image from Gonzalez books. Below are some code that I have tried but it doesn't get closer to the results of the sharpened image.
cvSmooth(grayImg, grayImg, CV_GAUSSIAN, 3, 0, 0, 0);
IplImage* laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_16S, 1);
IplImage* abs_laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvLaplace(grayImg, laplaceImg, 3);
cvConvertScaleAbs(laplaceImg, abs_laplaceImg, 1, 0);
IplImage* dstImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvAdd(abs_laplaceImg, grayImg, dstImg, NULL);
Before Sharpening
My Sharpening Result
Desired Result
Absolute Laplace
I think the problem is that you are blurring the image before take the 2nd derivate.
Here is the working code with the C++ API (I'm using Opencv 2.4.3). I tried also with MATLAB and the result is the same.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int /*argc*/, char** /*argv*/) {
Mat img, imgLaplacian, imgResult;
//------------------------------------------------------------------------------------------- test, first of all
// now do it by hand
img = (Mat_<uchar>(4,4) << 0,1,2,3,4,5,6,7,8,9,0,11,12,13,14,15);
// first, the good result
Laplacian(img, imgLaplacian, CV_8UC1);
cout << "let opencv do it" << endl;
cout << imgLaplacian << endl;
Mat kernel = (Mat_<float>(3,3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
int window_size = 3;
// now, reaaallly by hand
// note that, for avoiding padding, the result image will be smaller than the original one.
Mat frame, frame32;
Rect roi;
imgLaplacian = Mat::zeros(img.size(), CV_32F);
for(int y=0; y<img.rows-window_size/2-1; y++) {
for(int x=0; x<img.cols-window_size/2-1; x++) {
roi = Rect(x,y, window_size, window_size);
frame = img(roi);
frame.convertTo(frame, CV_32F);
frame = frame.mul(kernel);
float v = sum(frame)[0];
imgLaplacian.at<float>(y,x) = v;
}
}
imgLaplacian.convertTo(imgLaplacian, CV_8U);
cout << "dudee" << imgLaplacian << endl;
// a little bit less "by hand"..
// using cv::filter2D
filter2D(img, imgLaplacian, -1, kernel);
cout << imgLaplacian << endl;
//------------------------------------------------------------------------------------------- real stuffs now
img = imread("moon.jpg", 0); // load grayscale image
// ok, now try different kernel
kernel = (Mat_<float>(3,3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1); // another approximation of second derivate, more stronger
// do the laplacian filtering as it is
// well, we need to convert everything in something more deeper then CV_8U
// because the kernel has some negative values,
// and we can expect in general to have a Laplacian image with negative values
// BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
// so the possible negative number will be truncated
filter2D(img, imgLaplacian, CV_32F, kernel);
img.convertTo(img, CV_32F);
imgResult = img - imgLaplacian;
// convert back to 8bits gray scale
imgResult.convertTo(imgResult, CV_8U);
imgLaplacian.convertTo(imgLaplacian, CV_8U);
namedWindow("laplacian", CV_WINDOW_AUTOSIZE);
imshow( "laplacian", imgLaplacian );
namedWindow("result", CV_WINDOW_AUTOSIZE);
imshow( "result", imgResult );
while( true ) {
char c = (char)waitKey(10);
if( c == 27 ) { break; }
}
return 0;
}
Have fun!
I think the main problem lies in the fact that you do img + laplace, while img - laplace would give better results. I remember that img - 2*laplace was best, but I cannot find where I read that, probably in one of the books I read in university.
You need to do img - laplace instead of img + laplace.
laplace: f(x,y) = f(x-1,y+1) + f(x-1,y-1) + f(x,y+1) + f(x+1,y) - 4*f(x,y)
So, if you see subtract laplace from the original image you would see that the minus sign in front of 4*f(x,y) gets negated and this term becomes positive.
You could also have kernel with -5 in the center pixel instead of -4 to make the laplacian a one-step process instead of getting the getting the laplace and doing img - laplace Why? Try deriving that yourself.
This would be the final kernel.
Mat kernel = (Mat_(3,3) <<
-1, 0, -1,
0, -5, 0,
-1, 0, -1);
It is indeed a well-known result in image processing that if you subtract its Laplacian from an image, the image edges are amplified giving a sharper image.
Laplacian Filter Kernel algorithm: sharpened_pixel = 5 * current – left – right – up – down
enter image description here
So the Code will look like these:
void sharpen(const Mat& img, Mat& result)
{
result.create(img.size(), img.type());
//Processing the inner edge of the pixel point, the image of the outer edge of the pixel should be additional processing
for (int row = 1; row < img.rows-1; row++)
{
//Front row pixel
const uchar* previous = img.ptr<const uchar>(row-1);
//Current line to be processed
const uchar* current = img.ptr<const uchar>(row);
//new row
const uchar* next = img.ptr<const uchar>(row+1);
uchar *output = result.ptr<uchar>(row);
int ch = img.channels();
int starts = ch;
int ends = (img.cols - 1) * ch;
for (int col = starts; col < ends; col++)
{
//The traversing pointer of the output image is synchronized with the current row, and each channel value of each pixel in each row is given a increment, because the channel number of the image is to be taken into account.
*output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current[col+ch] - previous[col] - next[col]);
}
} //end loop
//Processing boundary, the peripheral pixel is set to 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
}
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
ggicci::sharpen(lena, sharpenedLena);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
If you are a lazier. Have fun with the following.
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 4, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
And the result like these.enter image description here
how would you go about reading the pixel value in HSV format rather than RGB? The code below reads the pixel value of the circles' centers in RGB format. Is there much difference when it comes to reading value in HSV?
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
//uchar* ptr;
//ptr = cvPtr2D(img, center.y, center.x, NULL);
//printf("B: %d G: %d R: %d\n", ptr[0],ptr[1],ptr[2]);
CvScalar s;
s = cvGet2D(img,center.y, center.x);//colour of circle
printf("B: %f G: %f R: %f\n",s.val[0],s.val[1],s.val[2]);
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
//cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
//cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", img);
//cvDestroyWindow("SnookerImage");
//cvDestroyWindow("circles");
//cvReleaseMemStorage("storage");
cvWaitKey(0);
return 0;
}
If you use the C++ interface, you can use
cv::cvtColor(img, img, CV_BGR2HSV);
See the documentation for cvtColor for more information.
Update:
Reading and writing pixels the slow way (assuming that the HSV values are stored as a cv::Vec3b (doc))
cv::Vec3b pixel = image.at<cv::Vec3b>(0,0); // read pixel (0,0) (make copy)
pixel[0] = 0; // H
pixel[1] = 0; // S
pixel[2] = 0; // V
image.at<cv::Vec3b>(0,0) = pixel; // write pixel (0,0) (copy pixel back to image)
Using the image.at<...>(x, y) (doc, scroll down a lot) notation is quite slow, if you want to manipulate every pixel. There is an article in the documentation on how to access the pixels faster. You can apply the iterator method also like this:
cv::MatIterator_<cv::Vec3b> it = image.begin<cv::Vec3b>(),
it_end = image.end<cv::Vec3b>();
for(; it != it_end; ++it)
{
// work with pixel in here, e.g.:
cv::Vec3b& pixel = *it; // reference to pixel in image
pixel[0] = 0; // changes pixel in image
}
I'm messing around with OpenCV, and am trying to do some of the same stuff signal processing stuff I've done in MatLab. I'm looking to mask out some frequencies, so I have constructed a matrix which will do this. The problem is that there seem to be a few more steps in OpenCV than in Matlab to accomplish this.
In Matlab, it's simple enough:
F = fft2(image);
smoothF = F .* mask; // multiply FT by mask
smooth = ifft2(smoothF); // do inverse FT
But I'm having trouble doing the same in OpenCV. The DFT leaves me with a 2 channel image, so I've split the image, multiplied by the mask, merged it back, and then perform the inverse DFT. However, I got a weird result in my final image. I'm pretty sure I'm missing something...
CvMat* maskImage(CvMat* im, int maskWidth, int maskHeight)
{
CvMat* mask = cvCreateMat(im->rows, im->cols, CV_64FC1);
cvZero(mask);
int cx, cy;
cx = mask->cols/2;
cy = mask->rows/2;
int left_x = cx - maskWidth;
int right_x = cx + maskWidth;
int top_y = cy + maskHeight;
int bottom_y = cy - maskHeight;
//create mask
for(int i = bottom_y; i < top_y; i++)
{
for(int j = left_x; j < right_x; j++)
{
cvmSet(mask,i,j,1.0f); // Set M(i,j)
}
}
cvShiftDFT(mask, mask);
IplImage* maskImage, stub;
maskImage = cvGetImage(mask, &stub);
cvNamedWindow("mask", 0);
cvShowImage("mask", maskImage);
CvMat* real = cvCreateMat(im->rows, im->cols, CV_64FC1);
CvMat* imag = cvCreateMat(im->rows, im->cols, CV_64FC1);
cvSplit(im, imag, real, NULL, NULL);
cvMul(real, mask, real);
cvMul(imag, mask, imag);
cvMerge(real, imag, NULL, NULL, im);
IplImage* maskedImage;
maskedImage = cvGetImage(imag, &stub);
cvNamedWindow("masked", 0);
cvShowImage("masked", maskedImage);
return im;
}
Any reason you are merging the real and imaginary components in the reverse order?