C++ opencv image editing - opencv

I want to provide center coordinates of small square i.e (5,5)[small square is 10x10] to be placed on (8,8) coordinates of larger square of 20x20.
to get this image result:

try this, I didn't test it though, so please comment if it doesn't work and I'll try to fix it =)
cv::Mat placeImageInImage(cv::Point center, cv::Mat src, cv::Mat dst_orig)
{
cv::Mat dst = dst_orig.clone();
cv::Rect targetRect = cv::Rect(center.x-src.cols/2, center.y-src.rows/2, src.cols, src.rows);
cv::Rect srcRect = cv::Rect(0,0,src.cols, src.rows);
cv::Rect dstRect = cv::Rect(0,0,dst.cols, dst.rows);
cv::Rect roi = targetRect & dstRect;
srcRect.width = roi.width;
srcRect.height = roi.height;
// didnt test, maybe swap the order:
srcRect.x += roi.x - targetRect.x;
srcRect.y += roi.y - targetRect.y;
src(srcRect).copyTo(dst(roi));
return dst;
}
using this code to test:
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat input2;
cv::resize(input, input2, cv::Size(256, 256));
cv::Mat result = placeImageInImage(cv::Point(256, 256), input2, input);
cv::imshow("input", input);
cv::imshow("result", result);
cv::waitKey(0);
return 0;
}
I get:

Related

Detecting eyes using CascadeClassifier_GPU (openCV)

I'm trying to detect faces and eyes at the same time using cacadeClassifier_GPU in OpenCV.
Detecting faces works fine but it doesn't detect any eye.
I'm using opencv 2.4.9(gpu version) in visual studio 2010.
Here is my code.
for(;;)
{
cap >> frame; // get a new frame from camera
double t = (double)getTickCount();
GpuMat faces;
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY); // convert to gray image as face detection do NOT use color info
int scale = 2;
cv::Mat resized_frame_gray((int)(frame_gray.rows/ scale), (int)(frame_gray.cols/ scale),CV_8UC1);
cv::resize(frame_gray, resized_frame_gray, resized_frame_gray.size() );
GpuMat gray_gpu(resized_frame_gray); // copy the gray image to GPU memory
equalizeHist(resized_frame_gray,resized_frame_gray);
int detect_num = cascadeFace.detectMultiScale(gray_gpu, faces, 1.2, 4, Size(10, 10) ); // call face detection routine
Mat obj_host;
faces.colRange(0, detect_num).download(obj_host); // retrieve results from GPU
Rect* cfaces = obj_host.ptr<Rect>(); // results are now in "obj_host"
for(int i=0;i<detect_num;++i)
{
Point pt1 = cfaces[i].tl();
pt1.x *= scale;
pt1.y *= scale;
Size sz = cfaces[i].size();
Point pt2(pt1.x+sz.width*scale, pt1.y+sz.height*scale);
rectangle(frame, pt1, pt2, Scalar(255));
Mat faceROI = resized_frame_gray( cfaces[i] );
GpuMat faceROIgpu(faceROI);
GpuMat eyes;
//-- In each face, detect eyes
int detect_num2 = cascade_eye.detectMultiScale( faceROIgpu, eyes, 1.2, 4, Size(10, 10) );
Mat obj_host_eye;
eyes.colRange(0, detect_num2).download(obj_host_eye);
Rect* cEyes = obj_host_eye.ptr<Rect>();
//cout<<detect_num2<<endl;
for( int j = 0; j < detect_num2; j++ )
{
Point pt1_e = cEyes[i].tl();
pt1_e.x *= scale;
pt1_e.y *= scale;
Size sz_e = cEyes[i].size();
Point pt2_e(pt1_e.x+sz_e.width*scale, pt1_e.y+sz_e.height*scale);
rectangle(frame, pt1_e, pt2_e, Scalar(255,0,255));
}
imshow("faces", frame);
waitKey(1);
}

How to getting a SOLID blob for each object using findcontours?

I wanna segementing a solid blobs for each object from extracted foreground and bounding each object with a box. But my code show many boxes bounding random blobs on 1 object, because my blob is not solid for 1 object and there're many small blobs too.
Here we go my code:
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,frame,cv::Size(10,10));
bgs.operator()(frame,fg);
bgs.getBackgroundImage(bgmodel);
cv::erode(fg,fg,cv::Mat());
cv::dilate(fg,fg,cv::Mat());
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
And the result here:
Frame
so how I can segmenting a solid blob for each object found from extracted foreground, and bounding the object on by one with the box?
a solid blob mean a solid white blob like here: xxx
I'll apreciating any help here.
NB: Sorry for my bad English. :)
=================
This is my edited code!
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat blurred;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,blurred,cv::Size(10,10));
bgs.operator()(blurred,fg);
bgs.getBackgroundImage(bgmodel);
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::Mat element50(50,50,CV_8U,cv::Scalar(1));
cv::morphologyEx(thresholded,thresholded,cv::MORPH_CLOSE,element50);
cv::morphologyEx(thresholded2,thresholded2,cv::MORPH_CLOSE,element50);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
and the result here: FRAME
Thanks to elactic. :)
You can try to merge blogs with morphological closing (which is the erosion of the dilation of a binary image). You can use the CV functions erode and dilate for that.
This tutorial should help you.
I assume you will still have to filter blobs by size after that.

Extract hand bones from X-ray image

I have x-ray image of a hand. I need to extract bones automatically. I can easily segmentate a hand using different techniques. But I need to get bones and using those techniques don't help. Some of the bones are brighter then orthers, so if I use thresholding some of them disapear while others become clearer rising threshold. And I think maybe I should threshold a region of the hand only? Is it possible to threshold ROI that is not a square? O maybe you have any other solutions, advices? Maybe there are some libraries like OpenCV or something for that? Any help would be very great!
Extended:
Raw Image Expected Output
One approach could be to segment the hand and fingers from the image:
And then creating another image with just the hand silhouette:
Once you have the silhouette you can erode the image to make it a little smaller. This is used to subtract the hand from the hand & fingers image, resulting in the fingers:
The code below shows to execute this approach:
void detect_hand_and_fingers(cv::Mat& src);
void detect_hand_silhoutte(cv::Mat& src);
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
if (img.empty())
{
std::cout << "!!! imread() failed to open target image" << std::endl;
return -1;
}
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
cv::Mat gray_silhouette = gray.clone();
/* Isolate Hand + Fingers */
detect_hand_and_fingers(gray);
cv::imshow("Hand+Fingers", gray);
cv::imwrite("hand_fingers.png", gray);
/* Isolate Hand Sillhoute and subtract it from the other image (Hand+Fingers) */
detect_hand_silhoutte(gray_silhouette);
cv::imshow("Hand", gray_silhouette);
cv::imwrite("hand_silhoutte.png", gray_silhouette);
/* Subtract Hand Silhoutte from Hand+Fingers so we get only Fingers */
cv::Mat fingers = gray - gray_silhouette;
cv::imshow("Fingers", fingers);
cv::imwrite("fingers_only.png", fingers);
cv::waitKey(0);
return 0;
}
void detect_hand_and_fingers(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,3), cv::Point(1,1));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_GAUSSIAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
9, -5);
int dilate_sz = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz, 2*dilate_sz),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
}
void detect_hand_silhoutte(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7), cv::Point(3, 3));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_MEAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
251, 5); // 251, 5
int erode_sz = 5;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*erode_sz + 1, 2*erode_sz+1),
cv::Point(erode_sz, erode_sz) );
cv::erode(src, src, element);
int dilate_sz = 1;
element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz + 1, 2*dilate_sz+1),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
cv::bitwise_not(src, src);
}

Image Sharpening Using Laplacian Filter

I was trying to sharpening on some standard image from Gonzalez books. Below are some code that I have tried but it doesn't get closer to the results of the sharpened image.
cvSmooth(grayImg, grayImg, CV_GAUSSIAN, 3, 0, 0, 0);
IplImage* laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_16S, 1);
IplImage* abs_laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvLaplace(grayImg, laplaceImg, 3);
cvConvertScaleAbs(laplaceImg, abs_laplaceImg, 1, 0);
IplImage* dstImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvAdd(abs_laplaceImg, grayImg, dstImg, NULL);
Before Sharpening
My Sharpening Result
Desired Result
Absolute Laplace
I think the problem is that you are blurring the image before take the 2nd derivate.
Here is the working code with the C++ API (I'm using Opencv 2.4.3). I tried also with MATLAB and the result is the same.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int /*argc*/, char** /*argv*/) {
Mat img, imgLaplacian, imgResult;
//------------------------------------------------------------------------------------------- test, first of all
// now do it by hand
img = (Mat_<uchar>(4,4) << 0,1,2,3,4,5,6,7,8,9,0,11,12,13,14,15);
// first, the good result
Laplacian(img, imgLaplacian, CV_8UC1);
cout << "let opencv do it" << endl;
cout << imgLaplacian << endl;
Mat kernel = (Mat_<float>(3,3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
int window_size = 3;
// now, reaaallly by hand
// note that, for avoiding padding, the result image will be smaller than the original one.
Mat frame, frame32;
Rect roi;
imgLaplacian = Mat::zeros(img.size(), CV_32F);
for(int y=0; y<img.rows-window_size/2-1; y++) {
for(int x=0; x<img.cols-window_size/2-1; x++) {
roi = Rect(x,y, window_size, window_size);
frame = img(roi);
frame.convertTo(frame, CV_32F);
frame = frame.mul(kernel);
float v = sum(frame)[0];
imgLaplacian.at<float>(y,x) = v;
}
}
imgLaplacian.convertTo(imgLaplacian, CV_8U);
cout << "dudee" << imgLaplacian << endl;
// a little bit less "by hand"..
// using cv::filter2D
filter2D(img, imgLaplacian, -1, kernel);
cout << imgLaplacian << endl;
//------------------------------------------------------------------------------------------- real stuffs now
img = imread("moon.jpg", 0); // load grayscale image
// ok, now try different kernel
kernel = (Mat_<float>(3,3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1); // another approximation of second derivate, more stronger
// do the laplacian filtering as it is
// well, we need to convert everything in something more deeper then CV_8U
// because the kernel has some negative values,
// and we can expect in general to have a Laplacian image with negative values
// BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
// so the possible negative number will be truncated
filter2D(img, imgLaplacian, CV_32F, kernel);
img.convertTo(img, CV_32F);
imgResult = img - imgLaplacian;
// convert back to 8bits gray scale
imgResult.convertTo(imgResult, CV_8U);
imgLaplacian.convertTo(imgLaplacian, CV_8U);
namedWindow("laplacian", CV_WINDOW_AUTOSIZE);
imshow( "laplacian", imgLaplacian );
namedWindow("result", CV_WINDOW_AUTOSIZE);
imshow( "result", imgResult );
while( true ) {
char c = (char)waitKey(10);
if( c == 27 ) { break; }
}
return 0;
}
Have fun!
I think the main problem lies in the fact that you do img + laplace, while img - laplace would give better results. I remember that img - 2*laplace was best, but I cannot find where I read that, probably in one of the books I read in university.
You need to do img - laplace instead of img + laplace.
laplace: f(x,y) = f(x-1,y+1) + f(x-1,y-1) + f(x,y+1) + f(x+1,y) - 4*f(x,y)
So, if you see subtract laplace from the original image you would see that the minus sign in front of 4*f(x,y) gets negated and this term becomes positive.
You could also have kernel with -5 in the center pixel instead of -4 to make the laplacian a one-step process instead of getting the getting the laplace and doing img - laplace Why? Try deriving that yourself.
This would be the final kernel.
Mat kernel = (Mat_(3,3) <<
-1, 0, -1,
0, -5, 0,
-1, 0, -1);
It is indeed a well-known result in image processing that if you subtract its Laplacian from an image, the image edges are amplified giving a sharper image.
Laplacian Filter Kernel algorithm: sharpened_pixel = 5 * current – left – right – up – down
enter image description here
So the Code will look like these:
void sharpen(const Mat& img, Mat& result)
{
result.create(img.size(), img.type());
//Processing the inner edge of the pixel point, the image of the outer edge of the pixel should be additional processing
for (int row = 1; row < img.rows-1; row++)
{
//Front row pixel
const uchar* previous = img.ptr<const uchar>(row-1);
//Current line to be processed
const uchar* current = img.ptr<const uchar>(row);
//new row
const uchar* next = img.ptr<const uchar>(row+1);
uchar *output = result.ptr<uchar>(row);
int ch = img.channels();
int starts = ch;
int ends = (img.cols - 1) * ch;
for (int col = starts; col < ends; col++)
{
//The traversing pointer of the output image is synchronized with the current row, and each channel value of each pixel in each row is given a increment, because the channel number of the image is to be taken into account.
*output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current[col+ch] - previous[col] - next[col]);
}
} //end loop
//Processing boundary, the peripheral pixel is set to 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
}
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
ggicci::sharpen(lena, sharpenedLena);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
If you are a lazier. Have fun with the following.
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 4, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
And the result like these.enter image description here

Read HSV value of pixel in opencv

how would you go about reading the pixel value in HSV format rather than RGB? The code below reads the pixel value of the circles' centers in RGB format. Is there much difference when it comes to reading value in HSV?
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
//uchar* ptr;
//ptr = cvPtr2D(img, center.y, center.x, NULL);
//printf("B: %d G: %d R: %d\n", ptr[0],ptr[1],ptr[2]);
CvScalar s;
s = cvGet2D(img,center.y, center.x);//colour of circle
printf("B: %f G: %f R: %f\n",s.val[0],s.val[1],s.val[2]);
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
//cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
//cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", img);
//cvDestroyWindow("SnookerImage");
//cvDestroyWindow("circles");
//cvReleaseMemStorage("storage");
cvWaitKey(0);
return 0;
}
If you use the C++ interface, you can use
cv::cvtColor(img, img, CV_BGR2HSV);
See the documentation for cvtColor for more information.
Update:
Reading and writing pixels the slow way (assuming that the HSV values are stored as a cv::Vec3b (doc))
cv::Vec3b pixel = image.at<cv::Vec3b>(0,0); // read pixel (0,0) (make copy)
pixel[0] = 0; // H
pixel[1] = 0; // S
pixel[2] = 0; // V
image.at<cv::Vec3b>(0,0) = pixel; // write pixel (0,0) (copy pixel back to image)
Using the image.at<...>(x, y) (doc, scroll down a lot) notation is quite slow, if you want to manipulate every pixel. There is an article in the documentation on how to access the pixels faster. You can apply the iterator method also like this:
cv::MatIterator_<cv::Vec3b> it = image.begin<cv::Vec3b>(),
it_end = image.end<cv::Vec3b>();
for(; it != it_end; ++it)
{
// work with pixel in here, e.g.:
cv::Vec3b& pixel = *it; // reference to pixel in image
pixel[0] = 0; // changes pixel in image
}

Resources