Extract hand bones from X-ray image - opencv

I have x-ray image of a hand. I need to extract bones automatically. I can easily segmentate a hand using different techniques. But I need to get bones and using those techniques don't help. Some of the bones are brighter then orthers, so if I use thresholding some of them disapear while others become clearer rising threshold. And I think maybe I should threshold a region of the hand only? Is it possible to threshold ROI that is not a square? O maybe you have any other solutions, advices? Maybe there are some libraries like OpenCV or something for that? Any help would be very great!
Extended:
Raw Image Expected Output

One approach could be to segment the hand and fingers from the image:
And then creating another image with just the hand silhouette:
Once you have the silhouette you can erode the image to make it a little smaller. This is used to subtract the hand from the hand & fingers image, resulting in the fingers:
The code below shows to execute this approach:
void detect_hand_and_fingers(cv::Mat& src);
void detect_hand_silhoutte(cv::Mat& src);
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
if (img.empty())
{
std::cout << "!!! imread() failed to open target image" << std::endl;
return -1;
}
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
cv::Mat gray_silhouette = gray.clone();
/* Isolate Hand + Fingers */
detect_hand_and_fingers(gray);
cv::imshow("Hand+Fingers", gray);
cv::imwrite("hand_fingers.png", gray);
/* Isolate Hand Sillhoute and subtract it from the other image (Hand+Fingers) */
detect_hand_silhoutte(gray_silhouette);
cv::imshow("Hand", gray_silhouette);
cv::imwrite("hand_silhoutte.png", gray_silhouette);
/* Subtract Hand Silhoutte from Hand+Fingers so we get only Fingers */
cv::Mat fingers = gray - gray_silhouette;
cv::imshow("Fingers", fingers);
cv::imwrite("fingers_only.png", fingers);
cv::waitKey(0);
return 0;
}
void detect_hand_and_fingers(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,3), cv::Point(1,1));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_GAUSSIAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
9, -5);
int dilate_sz = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz, 2*dilate_sz),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
}
void detect_hand_silhoutte(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7), cv::Point(3, 3));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_MEAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
251, 5); // 251, 5
int erode_sz = 5;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*erode_sz + 1, 2*erode_sz+1),
cv::Point(erode_sz, erode_sz) );
cv::erode(src, src, element);
int dilate_sz = 1;
element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz + 1, 2*dilate_sz+1),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
cv::bitwise_not(src, src);
}

Related

How can i draw boundary across a particular colour in opencv?

Suppose I have an image. I basically want to make boundary across a particular colour that I want. I know the hsv minimum and maximum scalar values of that colour. But I don't know how to proceed further.
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include<stdio.h>
#include<opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while(true)
{
Mat img;
cap.read(img);
Mat dst;
Mat imghsv;
cvtColor(img, imghsv, COLOR_BGR2HSV);
inRange(imghsv,
Scalar(0, 30, 0),
Scalar(20, 150, 255),
dst
);
imshow("name",dst);
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms
{
cout << "esc key is pressed by user" << endl;
break;
}
}
}
The inrange function works well but I am not able to draw a boundary across whatever is white (I mean whichever pixel is in the range specified)
You need to first segment the color, and then find the contours of the segmented image.
SEGMENT THE COLOR
Working in HSV is in general a good idea to segment colors. Once you have the correct lower and upper boundary, you can easily segment the color.
A simple approach is to use inRange.
You can find how to use it here for example.
FIND BOUNDARIES
Once you have the binary mask (obtained through segmentation), you can find its boundaries using findContours. You can refer to this or this to know how to use findContours to detect the boundary, and drawContours to draw it.
UPDATE
Here a working example on how to draw a contour on segmented objects.
I used some morphology to clean the mask, and changed to tracked color to be blue, but you can put your favorite color.
#include<opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (true)
{
Mat img;
cap.read(img);
Mat dst;
Mat imghsv;
cvtColor(img, imghsv, COLOR_BGR2HSV);
inRange(imghsv, Scalar(110, 100, 100), Scalar(130, 255, 255), dst); // Detect blue objects
// Remove some noise using morphological operators
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(7,7));
morphologyEx(dst, dst, MORPH_OPEN, kernel);
// Find contours
vector<vector<Point>> contours;
findContours(dst.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Draw all contours (green)
// This
drawContours(img, contours, -1, Scalar(0,255,0));
// If you want to draw a contour for a particular one, say the biggest...
// Find the biggest object
if (!contours.empty())
{
int idx_biggest = 0;
int val_biggest = contours[0].size();
for (int i = 0; i < contours.size(); ++i)
{
if (val_biggest < contours[i].size())
{
val_biggest = contours[i].size();
idx_biggest = i;
}
}
// Draw a single contour (blue)
drawContours(img, contours, idx_biggest, Scalar(255,0,0));
// You want also the rotated rectangle (blue) ?
RotatedRect r = minAreaRect(contours[idx_biggest]);
Point2f pts[4];
r.points(pts);
for (int j = 0; j < 4; ++j)
{
line(img, pts[j], pts[(j + 1) % 4], Scalar(0, 0, 255), 2);
}
}
imshow("name", dst);
imshow("image", img);
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms
{
cout << "esc key is pressed by user" << endl;
break;
}
}
}
If you want a particular hue to be detected then you can create a mask to select only the particular color from your original image.
on the hue channel (img):
cv::Mat mask = cv::Mat::zeros(img.size(),CV_8UC1);
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;i++){
if(img.at<uchar>(i,j)==(uchar)specific_hue){
mask.at<uchar>(i,j)=(uchar)255;
}
}
}
color_img.copyTo(masked_image, mask);
If you want something less rigorous, you can define a range around the color to allow more image to pass through the mask.
cv::Mat mask = cv::Mat::zeros(img.size(),CV_8UC1);
int threshold = 5;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;i++){
if((img.at<uchar>(i,j)>(uchar)(specific_hue - threshold)) && (img.at<uchar>(i,j)<(uchar)(specific_hue + threshold))){
mask.at<uchar>(i,j)=(uchar)255;
}
}
}
color_img.copyTo(masked_image, mask);

How to get better results with OpenCV face recognition Module

I'm trying to use OpenCV's face recognition module to recognize 2 subjects from a video. I cropped 30 face images of the first subject and 20 face images of the second subject from the video and I use these as my training set.
I've tested all three approaches (Eigenfaces, Fisherfaces and LBP histograms), but I'm not getting good results in neither of the approaches. Sometimes the first subject is classified as the second subject and vice-verse, sometimes false detections are classified as one of the two subjects and sometimes other people in the video are classified as one of the two subjects.
How can I improve performance? Would enlarging the training set help in improving the results? Are there any other packages I can consider that performs face recognition in C++? I think it should be an easy task as I'm trying to recognize only two different subjects.
Here is my code (I'm using OpenCV 2.4.7 on windows 8 with VS2012):
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <sstream>
#define EIGEN 0
#define FISHER 0
#define LBPH 1;
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame , int i,Ptr<FaceRecognizer> model);
static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1) {
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
}
// create and return normalized image
Mat dst;
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
/** Global variables */
String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_frontalface_alt.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\NewCascade.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
String eyes_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
string fn_csv = "C:\\OIM\\faces_org.csv";
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
// Read in the data. This can fail if no valid
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = images[0].rows;
// The following lines create an Eigenfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// This here is a full PCA, if you just want to keep
// 10 principal components (read Eigenfaces), then call
// the factory method like this:
//
// cv::createEigenFaceRecognizer(10);
//
// If you want to create a FaceRecognizer with a
// confidennce threshold, call it with:
//
// cv::createEigenFaceRecognizer(10, 123.0);
//
//Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
#if EIGEN
Ptr<FaceRecognizer> model = createEigenFaceRecognizer(10,2000000000);
#elif FISHER
Ptr<FaceRecognizer> model = createFisherFaceRecognizer(0, 200000000);
#elif LBPH
Ptr<FaceRecognizer> model =createLBPHFaceRecognizer(1,8,8,8,200000000);
#endif
model->train(images, labels);
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
// Get the frame rate
bool stop(false);
int count=1;
char filename[512];
for (int i=1;i<=517;i++){
sprintf(filename,"C:\\OIM\\original_frames2\\image%d.jpg",i);
Mat frame=imread(filename);
detectAndDisplay(frame,i,model);
waitKey(0);
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame ,int i, Ptr<FaceRecognizer> model)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
//face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(10, 10) );
for( size_t i = 0; i < faces.size(); i++ )
{
Rect roi = Rect(faces[i].x,faces[i].y,faces[i].width,faces[i].height);
Mat face=frame_gray(roi);
resize(face,face,Size(200,200));
int predictedLabel = -1;
double confidence = 0.0;
model->predict(face, predictedLabel, confidence);
//imshow("gil",face);
//waitKey(0);
#if EIGEN
int M=10000;
#elif FISHER
int M=500;
#elif LBPH
int M=300;
#endif
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
if ((predictedLabel==1)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 0, 255 ), 4, 8, 0 );
if ((predictedLabel==0)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 0), 4, 8, 0 );
if (confidence>M)
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 255, 0), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
//circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
//imshow( window_name, frame );
char filename[512];
sprintf(filename,"C:\\OIM\\FaceRecognitionResults\\image%d.jpg",i);
imwrite(filename,frame);
}
Thanks in advance,
Gil.
First thing, as commented, increase the number of samples if possible. Also include the variations (like illumination, slight poses etc) you expect to be in the video. However, especially for eigenfaces/ fisherfaces so many images will not help to increase performance. Sadly, the best number of training samples can depend on your data.
The more important point is the hardness of the problem is totally depends on your video. If your video contains variations like illumination, pose; then you can't expect using purely appearance based methods(e.g Eigenfaces) and texture descriptor(LBP) will be succesful. First, you might want to detect faces. Then:
You might want to estimate face position and warp to frontal; check
for Active Appearance Model and Active Shape Model
Use histogram of equalization to attenuate illumination problem
Fitting an ellipse to detected face region will help against background noise.
Of course, there are many other methods available in literature; the steps I wrote is implemented in OpenCV and commonly known.
Hope it helps.

Image Sharpening Using Laplacian Filter

I was trying to sharpening on some standard image from Gonzalez books. Below are some code that I have tried but it doesn't get closer to the results of the sharpened image.
cvSmooth(grayImg, grayImg, CV_GAUSSIAN, 3, 0, 0, 0);
IplImage* laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_16S, 1);
IplImage* abs_laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvLaplace(grayImg, laplaceImg, 3);
cvConvertScaleAbs(laplaceImg, abs_laplaceImg, 1, 0);
IplImage* dstImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvAdd(abs_laplaceImg, grayImg, dstImg, NULL);
Before Sharpening
My Sharpening Result
Desired Result
Absolute Laplace
I think the problem is that you are blurring the image before take the 2nd derivate.
Here is the working code with the C++ API (I'm using Opencv 2.4.3). I tried also with MATLAB and the result is the same.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int /*argc*/, char** /*argv*/) {
Mat img, imgLaplacian, imgResult;
//------------------------------------------------------------------------------------------- test, first of all
// now do it by hand
img = (Mat_<uchar>(4,4) << 0,1,2,3,4,5,6,7,8,9,0,11,12,13,14,15);
// first, the good result
Laplacian(img, imgLaplacian, CV_8UC1);
cout << "let opencv do it" << endl;
cout << imgLaplacian << endl;
Mat kernel = (Mat_<float>(3,3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
int window_size = 3;
// now, reaaallly by hand
// note that, for avoiding padding, the result image will be smaller than the original one.
Mat frame, frame32;
Rect roi;
imgLaplacian = Mat::zeros(img.size(), CV_32F);
for(int y=0; y<img.rows-window_size/2-1; y++) {
for(int x=0; x<img.cols-window_size/2-1; x++) {
roi = Rect(x,y, window_size, window_size);
frame = img(roi);
frame.convertTo(frame, CV_32F);
frame = frame.mul(kernel);
float v = sum(frame)[0];
imgLaplacian.at<float>(y,x) = v;
}
}
imgLaplacian.convertTo(imgLaplacian, CV_8U);
cout << "dudee" << imgLaplacian << endl;
// a little bit less "by hand"..
// using cv::filter2D
filter2D(img, imgLaplacian, -1, kernel);
cout << imgLaplacian << endl;
//------------------------------------------------------------------------------------------- real stuffs now
img = imread("moon.jpg", 0); // load grayscale image
// ok, now try different kernel
kernel = (Mat_<float>(3,3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1); // another approximation of second derivate, more stronger
// do the laplacian filtering as it is
// well, we need to convert everything in something more deeper then CV_8U
// because the kernel has some negative values,
// and we can expect in general to have a Laplacian image with negative values
// BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
// so the possible negative number will be truncated
filter2D(img, imgLaplacian, CV_32F, kernel);
img.convertTo(img, CV_32F);
imgResult = img - imgLaplacian;
// convert back to 8bits gray scale
imgResult.convertTo(imgResult, CV_8U);
imgLaplacian.convertTo(imgLaplacian, CV_8U);
namedWindow("laplacian", CV_WINDOW_AUTOSIZE);
imshow( "laplacian", imgLaplacian );
namedWindow("result", CV_WINDOW_AUTOSIZE);
imshow( "result", imgResult );
while( true ) {
char c = (char)waitKey(10);
if( c == 27 ) { break; }
}
return 0;
}
Have fun!
I think the main problem lies in the fact that you do img + laplace, while img - laplace would give better results. I remember that img - 2*laplace was best, but I cannot find where I read that, probably in one of the books I read in university.
You need to do img - laplace instead of img + laplace.
laplace: f(x,y) = f(x-1,y+1) + f(x-1,y-1) + f(x,y+1) + f(x+1,y) - 4*f(x,y)
So, if you see subtract laplace from the original image you would see that the minus sign in front of 4*f(x,y) gets negated and this term becomes positive.
You could also have kernel with -5 in the center pixel instead of -4 to make the laplacian a one-step process instead of getting the getting the laplace and doing img - laplace Why? Try deriving that yourself.
This would be the final kernel.
Mat kernel = (Mat_(3,3) <<
-1, 0, -1,
0, -5, 0,
-1, 0, -1);
It is indeed a well-known result in image processing that if you subtract its Laplacian from an image, the image edges are amplified giving a sharper image.
Laplacian Filter Kernel algorithm: sharpened_pixel = 5 * current – left – right – up – down
enter image description here
So the Code will look like these:
void sharpen(const Mat& img, Mat& result)
{
result.create(img.size(), img.type());
//Processing the inner edge of the pixel point, the image of the outer edge of the pixel should be additional processing
for (int row = 1; row < img.rows-1; row++)
{
//Front row pixel
const uchar* previous = img.ptr<const uchar>(row-1);
//Current line to be processed
const uchar* current = img.ptr<const uchar>(row);
//new row
const uchar* next = img.ptr<const uchar>(row+1);
uchar *output = result.ptr<uchar>(row);
int ch = img.channels();
int starts = ch;
int ends = (img.cols - 1) * ch;
for (int col = starts; col < ends; col++)
{
//The traversing pointer of the output image is synchronized with the current row, and each channel value of each pixel in each row is given a increment, because the channel number of the image is to be taken into account.
*output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current[col+ch] - previous[col] - next[col]);
}
} //end loop
//Processing boundary, the peripheral pixel is set to 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
}
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
ggicci::sharpen(lena, sharpenedLena);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
If you are a lazier. Have fun with the following.
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 4, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
And the result like these.enter image description here

find shape and color circle

I have detected just all the red contours and am struggling to find a way to run a shape detection algorithm on these contours to get just red circules but don't know how to extract just red circle and eliminate the undesirable rest of contours ? Source Code:
#include "stdafx.h"
#include"math.h"
#include"conio.h"
#include"cv.h"
#include"highgui.h"
#include"stdio.h"
#include <math.h>
int main()
{
int i,j,k;
int h,w,seuill,channels;
int seuilr, channelsr;
int temp=0;
uchar *data,*datar;
i=j=k=0;
IplImage *frame=cvLoadImage("Mon_image.jpg",1);
IplImage *result=cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
IplImage *gray=cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
cvCvtColor(frame, result, CV_BGR2GRAY );
//IplImage* gray;
cvNamedWindow("original",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Result",CV_WINDOW_AUTOSIZE);
h = frame->height;
w = frame->width;
seuill =frame->widthStep;
channels = frame->nChannels;
data = (uchar *)frame->imageData;
seuilr=result->widthStep;
channelsr=result->nChannels;
datar = (uchar *)result->imageData;
for(i=0;i < (h);i++)
for(j=0;j <(w);j++)
{
if(((data[i*seuill+j*channels+2]) >(19+data[i*seuill+j*channels]))&& ((data[i*seuill+j*channels+2]) > (19+data[i*seuill+j*channels+1])))
datar[i*seuilr+j*channelsr]=255;
else
datar[i*seuilr+j*channelsr]=0;
}
cvCanny(result,result, 50, 100, 3);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* circles = cvHoughCircles(result, storage, CV_HOUGH_GRADIENT, 1, 40.0, 100, 100,0,0);
cvShowImage("original",frame);
cvShowImage("Result",result);
cvSaveImage("result.jpg",result);
cvWaitKey(0);
cvDestroyWindow("original");
cvDestroyWindow("Result");
return 0;
}
I will rather use RANSAC algorithm to detect circle in your set of contours but, Hough transform will also do the work.
See here for an explication of the both process. Solution in matlab in given.

opencv background substraction

I have an image of the background scene and an image of the same scene with objects in front. Now I want to create a mask of the object in the foreground with background substraction. Both images are RGB.
I have already created the following code:
cv::Mat diff;
diff.create(orgImage.dims, orgImage.size, CV_8UC3);
diff = abs(orgImage-refImage);
cv::Mat mask(diff.rows, diff.cols, CV_8U, cv::Scalar(0,0,0));
//mask = (diff > 10);
for (int j=0; j<diff.rows; j++) {
// get the address of row j
//uchar* dataIn= diff.ptr<uchar>(j);
//uchar* dataOut= mask.ptr<uchar>(j);
for (int i=0; i<diff.cols; i++) {
if(diff.at<cv::Vec3b>(j,i)[0] > 30 || diff.at<cv::Vec3b>(j,i)[1] > 30 || diff.at<cv::Vec3b>(j,i)[2] > 30)
mask.at<uchar>(j,i) = 255;
}
}
I dont know if I am doing this right?
Have a look at the inRange function from OpenCV. This will allow you to set multiple thresholds at the same time for a 3 channel image.
So, to create the mask you were looking for, do the following:
inRange(diff, Scalar(30, 30, 30), Scalar(255, 255, 255), mask);
This should also be faster than trying to access each pixel yourself.
EDIT : If skin detection is what you are trying to do, I would first do skin detection, and then afterwards do background subtraction to remove the background. Otherwise, your skin detector will have to take into account the intensity shift caused by the subtraction.
Check out my other answer, about good techniques for skin detection.
EDIT :
Is this any faster?
int main(int argc, char* argv[])
{
Mat fg = imread("fg.jpg");
Mat bg = imread("bg.jpg");
cvtColor(fg, fg, CV_RGB2YCrCb);
cvtColor(bg, bg, CV_RGB2YCrCb);
Mat distance = Mat::zeros(fg.size(), CV_32F);
vector<Mat> fgChannels;
split(fg, fgChannels);
vector<Mat> bgChannels;
split(bg, bgChannels);
for(size_t i = 0; i < fgChannels.size(); i++)
{
Mat temp = abs(fgChannels[i] - bgChannels[i]);
temp.convertTo(temp, CV_32F);
distance = distance + temp;
}
Mat mask;
threshold(distance, mask, 35, 255, THRESH_BINARY);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(mask, mask, MORPH_OPEN, kernel5x5);
imshow("fg", fg);
imshow("bg", bg);
imshow("mask", mask);
waitKey();
return 0;
}
This code produces this mask based on your input imagery:
Finally, here is what I get using my simple thresholding method:
Mat diff = fgYcc - bgYcc;
vector<Mat> diffChannels;
split(diff, diffChannels);
// only operating on luminance for background subtraction...
threshold(diffChannels[0], bgfgMask, 1, 255.0, THRESH_BINARY_INV);
Mat kernel5x5 = getStructuringElement(MORPH_RECT, Size(5, 5));
morphologyEx(bgfgMask, bgfgMask, MORPH_OPEN, kernel5x5);
This produce the following mask:
I think when I'm doing it like this I get the right results: (in the YCrCb colorspace) but accessing each px is slow so I need to find another algorithm
cv::Mat mask(image.rows, image.cols, CV_8U, cv::Scalar(0,0,0));
cv::Mat_<cv::Vec3b>::const_iterator itImage= image.begin<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::const_iterator itend= image.end<cv::Vec3b>();
cv::Mat_<cv::Vec3b>::iterator itRef= refRoi.begin<cv::Vec3b>();
cv::Mat_<uchar>::iterator itMask= mask.begin<uchar>();
for ( ; itImage!= itend; ++itImage, ++itRef, ++itMask) {
int distance = abs((*itImage)[0]-(*itRef)[0])+
abs((*itImage)[1]-(*itRef)[1])+
abs((*itImage)[2]-(*itRef)[2]);
if(distance < 30)
*itMask = 0;
else
*itMask = 255;
}

Resources