I'm messing around with OpenCV, and am trying to do some of the same stuff signal processing stuff I've done in MatLab. I'm looking to mask out some frequencies, so I have constructed a matrix which will do this. The problem is that there seem to be a few more steps in OpenCV than in Matlab to accomplish this.
In Matlab, it's simple enough:
F = fft2(image);
smoothF = F .* mask; // multiply FT by mask
smooth = ifft2(smoothF); // do inverse FT
But I'm having trouble doing the same in OpenCV. The DFT leaves me with a 2 channel image, so I've split the image, multiplied by the mask, merged it back, and then perform the inverse DFT. However, I got a weird result in my final image. I'm pretty sure I'm missing something...
CvMat* maskImage(CvMat* im, int maskWidth, int maskHeight)
{
CvMat* mask = cvCreateMat(im->rows, im->cols, CV_64FC1);
cvZero(mask);
int cx, cy;
cx = mask->cols/2;
cy = mask->rows/2;
int left_x = cx - maskWidth;
int right_x = cx + maskWidth;
int top_y = cy + maskHeight;
int bottom_y = cy - maskHeight;
//create mask
for(int i = bottom_y; i < top_y; i++)
{
for(int j = left_x; j < right_x; j++)
{
cvmSet(mask,i,j,1.0f); // Set M(i,j)
}
}
cvShiftDFT(mask, mask);
IplImage* maskImage, stub;
maskImage = cvGetImage(mask, &stub);
cvNamedWindow("mask", 0);
cvShowImage("mask", maskImage);
CvMat* real = cvCreateMat(im->rows, im->cols, CV_64FC1);
CvMat* imag = cvCreateMat(im->rows, im->cols, CV_64FC1);
cvSplit(im, imag, real, NULL, NULL);
cvMul(real, mask, real);
cvMul(imag, mask, imag);
cvMerge(real, imag, NULL, NULL, im);
IplImage* maskedImage;
maskedImage = cvGetImage(imag, &stub);
cvNamedWindow("masked", 0);
cvShowImage("masked", maskedImage);
return im;
}
Any reason you are merging the real and imaginary components in the reverse order?
Related
I am currently converting image from RGB to YCrCb format using OpenCV function -cvtColor. I would like perform the conversion on my own with equations similar to
//equations for RGB to YUV conversion
Y' = 0.299 R + 0.587 G + 0.114 B
U = -0.147 R - 0.289 G + 0.436 B
V = 0.615 R - 0.515 G - 0.100 B.
I am not able to understand OpenCV image matrix operation. I would like to access RGB pixel values from image Mat so that I can perform the conversion myself. How can I get R,G,B values from image and then how to apply the transformation ? My current code below.
int main (int argc, char *argv[])
{
// Load in image
cv::Mat src = cv ::imread("C:\\openv2410\\frames\\frame_0.png",1);
// Create a vector for the channels and split the original image into B G R colour channels.
// Keep in mind that OpenCV uses BGR and not RGB images
vector<cv::Mat> spl;
split(src,spl);
// Create an zero pixel image for filling purposes - will become clear later
// Also create container images for B G R channels as colour images
cv::Mat empty_image = cv::Mat::zeros(src.rows, src.cols, CV_8UC1);
cv::Mat empty_channel = cv::Mat::zeros(src.rows, src.cols, CV_8UC1);
cv::Mat result_blue(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
cv::Mat result_green(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
cv::Mat result_red(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
// Create blue channel
cv::Mat in1[] = { spl[0], empty_image, empty_image };
int from_to1[] = { 0,0, 1,1, 2,2 };
mixChannels( in1, 3, &result_blue, 1, from_to1, 3 );
// Create green channel
cv::Mat in2[] = { empty_channel, spl[1], empty_image };
int from_to2[] = { 0,0, 1,1, 2,2 };
mixChannels( in2, 3, &result_green, 1, from_to2, 3 );
// Create red channel
cv::Mat in3[] = { empty_channel, empty_channel, spl[2]};
int from_to3[] = { 0,0, 1,1, 2,2 };
mixChannels( in3, 3, &result_red, 1, from_to3, 3 );
imshow("blue channel",result_blue);
imshow("green channel",result_green);
imshow("red channel",result_red);
cv::waitKey(0);
return 0;
}
Sample code for conversion from BGR to YCrCb. Source(1).
//sample input and output
float data[3][1] = { 98,76,88 };
Mat input( 1, 1, CV_32FC3, data) ;
Mat output( 1, 1, CV_32FC3 );
//iterate over all pixels
for(int i = 0; i < input.rows; i++) {
for(int j = 0; j < input.cols; j++) {
//get bgr pixel
Vec3f bgrPixel = input.at<Vec3f>(i, j);
float B = bgrPixel[0];
float G = bgrPixel[1];
float R = bgrPixel[2];
//actual conversion from BGR to YCrCb
float delta = 0.5f;
float Y = 0.299f * R + 0.587f * G + 0.114f * B;
float Cb = (B - Y) * 0.564f + delta;
float Cr = (R - Y) * 0.713f + delta;
//store into result image
Vec3f yCrCbPixel( Y, Cr, Cb );
output.at<Vec3f>(i, j) = yCrCbPixel;
}
}
I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.
Code :
cv::Point2f src_vertices[4];
src_vertices[0] = c1[0];
src_vertices[1] = c1[1];
src_vertices[2] = c1[2];
src_vertices[3] = c1[3];
cv::Point2f dst_vertices[4];
dst_vertices[0] = c2[0];
dst_vertices[1] = c2[1];
dst_vertices[2] = c2[2];
dst_vertices[3] = c2[3];
cv::Mat warpMatrix = getPerspectiveTransform(src_vertices,dst_vertices);
cv::Mat output = cv::Mat::zeros(original.cols,original.rows , CV_32FC3);
cv::warpPerspective(original, output, warpMatrix,cv::Size(606,606));
UIImage *_adjustedImage = [MAOpenCV UIImageFromCVMat:output];
Below is the original image
After apply straightening, output is below image
Issue
The output of the image that we are getting after straightening is getting cropped a bit from the corner and the output comes from the Open CV framework itself.
How to resolved this issue. Please let me know if anybody has found the solution. Thank you.
Since this question is asked quite often, I've written a few lines of code which save some time for many others.
try this:
cv::Rect computeWarpedContourRegion(const std::vector<cv::Point> & points, const cv::Mat & homography)
{
std::vector<cv::Point2f> transformed_points(points.size());
for(unsigned int i=0; i<points.size(); ++i)
{
// warp the points
transformed_points[i].x = points[i].x * homography.at<double>(0,0) + points[i].y * homography.at<double>(0,1) + homography.at<double>(0,2) ;
transformed_points[i].y = points[i].x * homography.at<double>(1,0) + points[i].y * homography.at<double>(1,1) + homography.at<double>(1,2) ;
}
// dehomogenization necessary?
if(homography.rows == 3)
{
float homog_comp;
for(unsigned int i=0; i<transformed_points.size(); ++i)
{
homog_comp = points[i].x * homography.at<double>(2,0) + points[i].y * homography.at<double>(2,1) + homography.at<double>(2,2) ;
transformed_points[i].x /= homog_comp;
transformed_points[i].y /= homog_comp;
}
}
// now find the bounding box for these points:
cv::Rect boundingBox = cv::boundingRect(transformed_points);
return boundingBox;
}
cv::Rect computeWarpedImageRegion(const cv::Mat & image, const cv::Mat & homography)
{
std::vector<cv::Point> imageBorder;
imageBorder.push_back(cv::Point(0,0));
imageBorder.push_back(cv::Point(image.cols,0));
imageBorder.push_back(cv::Point(image.cols,image.rows));
imageBorder.push_back(cv::Point(0,image.rows));
return computeWarpedContourRegion(imageBorder, homography);
}
cv::Mat adjustHomography(const cv::Rect & transformedRegion, const cv::Mat & homography)
{
if(homography.rows == 2) throw("homography adjustement for affine matrix not implemented yet");
// unit matrix
cv::Mat correctionHomography = cv::Mat::eye(3,3,CV_64F);
// correction translation
correctionHomography.at<double>(0,2) = -transformedRegion.x;
correctionHomography.at<double>(1,2) = -transformedRegion.y;
return correctionHomography * homography;
}
int main()
{
// straightening algorithm without cropping:
cv::Mat original = cv::imread("straightening_src.png");
cv::Mat output;
cv::Point2f src_vertices[4];
cv::Point2f dst_vertices[4];
// I have to add them manually, you can just use your old code here.
// my result will look different, since I don't use your original point correspondences, but system is the same...
src_vertices[0] = cv::Point2f(108,190);
src_vertices[1] = cv::Point2f(273,178);
src_vertices[2] = cv::Point2f(389,322);
src_vertices[3] = cv::Point2f(183,355);
dst_vertices[0] = cv::Point2f(172,190);
dst_vertices[1] = cv::Point2f(374,193);
dst_vertices[2] = cv::Point2f(380,362);
dst_vertices[3] = cv::Point2f(171,366);
// compute homography
cv::Mat warpMatrix = getPerspectiveTransform(src_vertices,dst_vertices);
// now you have to find out, whether the warped image will fit to the output image or whether it will be cropped.
// if it will be cropped you will most probably have to
// 1. find out how big your output image must be and the coordinates it will be warped to.
// 2. modify your transformation (by a translation) so that the output image will be placed properly inside the output image
// part 1: find the region that will hold the new image.
cv::Rect warpedImageRegion = computeWarpedImageRegion(original, warpMatrix);
// part 2: modify the transformation.
cv::Mat adjustedHomography = adjustHomography(warpedImageRegion, warpMatrix);
cv::Size transformedImageSize = cv::Size(warpedImageRegion.width,warpedImageRegion.height);
cv::warpPerspective(original, output, adjustedHomography, transformedImageSize);
cv::imshow("output", output);
cv::imwrite("straightening_result.png", output);
cv::waitKey(-1);
}
for this input (1) and the given transformation correspondences you will get that result (2)
(1)
(2)
After the image is skewed, it should be possible to remove the black extra part of the image.
I have an image of the background scene and an image of the same scene with objects in front. Now I want to create a mask of the object in the foreground with background substraction. Both images are RGB.
I have already created the following code:
cv::Mat diff;
diff.create(orgImage.dims, orgImage.size, CV_8UC3);
diff = abs(orgImage-refImage);
cv::Mat mask(diff.rows, diff.cols, CV_8U, cv::Scalar(0,0,0));
//mask = (diff > 10);
for (int j=0; j<diff.rows; j++) {
// get the address of row j
//uchar* dataIn= diff.ptr<uchar>(j);
//uchar* dataOut= mask.ptr<uchar>(j);
for (int i=0; i<diff.cols; i++) {
if(diff.at<cv::Vec3b>(j,i)[0] > 30 || diff.at<cv::Vec3b>(j,i)[1] > 30 || diff.at<cv::Vec3b>(j,i)[2] > 30)
mask.at<uchar>(j,i) = 255;
}
}
I dont know if I am doing this right?
Have a look at the inRange function from OpenCV. This will allow you to set multiple thresholds at the same time for a 3 channel image.
So, to create the mask you were looking for, do the following:
inRange(diff, Scalar(30, 30, 30), Scalar(255, 255, 255), mask);
This should also be faster than trying to access each pixel yourself.
EDIT : If skin detection is what you are trying to do, I would first do skin detection, and then afterwards do background subtraction to remove the background. Otherwise, your skin detector will have to take into account the intensity shift caused by the subtraction.
Check out my other answer, about good techniques for skin detection.
EDIT :
Is this any faster?
int main(int argc, char* argv[])
{
Mat fg = imread("fg.jpg");
Mat bg = imread("bg.jpg");
cvtColor(fg, fg, CV_RGB2YCrCb);
cvtColor(bg, bg, CV_RGB2YCrCb);
Mat distance = Mat::zeros(fg.size(), CV_32F);
vector<Mat> fgChannels;
split(fg, fgChannels);
vector<Mat> bgChannels;
split(bg, bgChannels);
for(size_t i = 0; i < fgChannels.size(); i++)
{
Mat temp = abs(fgChannels[i] - bgChannels[i]);
temp.convertTo(temp, CV_32F);
distance = distance + temp;
}
Mat mask;
threshold(distance, mask, 35, 255, THRESH_BINARY);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(mask, mask, MORPH_OPEN, kernel5x5);
imshow("fg", fg);
imshow("bg", bg);
imshow("mask", mask);
waitKey();
return 0;
}
This code produces this mask based on your input imagery:
Finally, here is what I get using my simple thresholding method:
Mat diff = fgYcc - bgYcc;
vector<Mat> diffChannels;
split(diff, diffChannels);
// only operating on luminance for background subtraction...
threshold(diffChannels[0], bgfgMask, 1, 255.0, THRESH_BINARY_INV);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(bgfgMask, bgfgMask, MORPH_OPEN, kernel5x5);
This produce the following mask:
I think when I'm doing it like this I get the right results: (in the YCrCb colorspace) but accessing each px is slow so I need to find another algorithm
cv::Mat mask(image.rows, image.cols, CV_8U, cv::Scalar(0,0,0));
cv::Mat_<cv::Vec3b>::const_iterator itImage= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::iterator itRef= refRoi.begin<cv::Vec3b>();
cv::Mat_<uchar>::iterator itMask= mask.begin<uchar>();
for ( ; itImage!= itend; ++itImage, ++itRef, ++itMask) {
int distance = abs((*itImage)[0]-(*itRef)[0])+
abs((*itImage)[1]-(*itRef)[1])+
abs((*itImage)[2]-(*itRef)[2]);
if(distance < 30)
*itMask = 0;
else
*itMask = 255;
}
I'm trying to create a program that will draw a 2d greyscale spectrum of a given image. I'm using OpenCV and FFTW libraries. By using tips and codes from the internet and modifying them I've managed to load an image, calculate fft of this image and recreate the image from the fft (it's the same). What I'm unable to do is to draw the fourier spectrum itself. Could you please help me?
Here's the code (less important lines removed):
/* Copy input image */
/* Create output image */
/* Allocate input data for FFTW */
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
dft = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
/* Create plans */
plan_f = fftw_plan_dft_2d(w, h, in, dft, FFTW_FORWARD, FFTW_ESTIMATE);
/* Populate input data in row-major order */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
{
in[k][0] = ((uchar*)(img1->imageData + i * img1->widthStep))[j];
in[k][1] = 0.;
}
}
/* forward DFT */
fftw_execute(plan_f);
/* spectrum */
for (i = 0, k = 0; i < h; i++)
{
for (j = 0; j < w; j++, k++)
((uchar*)(img2->imageData + i * img2->widthStep))[j] = sqrt(pow(dft[k][0],2) + pow(dft[k][1],2));
}
cvShowImage("iplimage_dft(): original", img1);
cvShowImage("iplimage_dft(): result", img2);
cvWaitKey(0);
/* Free memory */
}
The problem is in the "Spectrum" section. Instead of a spectrum I get some noise. What am I doing wrong? I would be grateful for your help.
You need to draw magnitude of spectrum. here is the code.
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize( Src.rows );
int N = getOptimalDFTSize( Src.cols );
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
// Создаем комплексное представление изображения
// planes[0] содержит само изображение, planes[1] его мнимую часть (заполнено нулями)
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
// После преобразования результат так-же состоит из действительной и мнимой части
split(complexImg, planes);
// обрежем спектр, если у него нечетное количество строк или столбцов
planes[0] = planes[0](Rect(0, 0, planes[0].cols & -2, planes[0].rows & -2));
planes[1] = planes[1](Rect(0, 0, planes[1].cols & -2, planes[1].rows & -2));
Recomb(planes[0],planes[0]);
Recomb(planes[1],planes[1]);
// Нормализуем спектр
planes[0]/=float(M*N);
planes[1]/=float(M*N);
FImg[0]=planes[0].clone();
FImg[1]=planes[1].clone();
}
void ForwardFFT_Mag_Phase(Mat &src, Mat &Mag,Mat &Phase)
{
Mat planes[2];
ForwardFFT(src,planes);
Mag.zeros(planes[0].rows,planes[0].cols,CV_32F);
Phase.zeros(planes[0].rows,planes[0].cols,CV_32F);
cv::cartToPolar(planes[0],planes[1],Mag,Phase);
}
Mat LogMag;
LogMag.zeros(Mag.rows,Mag.cols,CV_32F);
LogMag=(Mag+1);
cv::log(LogMag,LogMag);
//---------------------------------------------------
imshow("Логарифм амплитуды", LogMag);
imshow("Фаза", Phase);
imshow("Результат фильтрации", img);
Can you try to do the IFFT step and see if you recover the original image ? then , you can check step by step where is your problem. Another solution to find the problem is to do this process with a small matrix predefined by you ,and calculate it FFT in MATLAB, and check step by step, it worked for me!