I wanna segementing a solid blobs for each object from extracted foreground and bounding each object with a box. But my code show many boxes bounding random blobs on 1 object, because my blob is not solid for 1 object and there're many small blobs too.
Here we go my code:
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,frame,cv::Size(10,10));
bgs.operator()(frame,fg);
bgs.getBackgroundImage(bgmodel);
cv::erode(fg,fg,cv::Mat());
cv::dilate(fg,fg,cv::Mat());
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
And the result here:
Frame
so how I can segmenting a solid blob for each object found from extracted foreground, and bounding the object on by one with the box?
a solid blob mean a solid white blob like here: xxx
I'll apreciating any help here.
NB: Sorry for my bad English. :)
=================
This is my edited code!
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat blurred;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,blurred,cv::Size(10,10));
bgs.operator()(blurred,fg);
bgs.getBackgroundImage(bgmodel);
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::Mat element50(50,50,CV_8U,cv::Scalar(1));
cv::morphologyEx(thresholded,thresholded,cv::MORPH_CLOSE,element50);
cv::morphologyEx(thresholded2,thresholded2,cv::MORPH_CLOSE,element50);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
and the result here: FRAME
Thanks to elactic. :)
You can try to merge blogs with morphological closing (which is the erosion of the dilation of a binary image). You can use the CV functions erode and dilate for that.
This tutorial should help you.
I assume you will still have to filter blobs by size after that.
Related
I know this theme already exists, but I didn't find any solution for this.
I am trying to detect characters from picture in this code below:
#include <tesseract/baseapi.h>
#include <leptonica/allheaders.h>
#include <opencv2/opencv.hpp>
#include <sstream>
#include <memory>
#include <iostream>
#define path "/home/jovan/Pictures/"
void resize(cv::Mat &img);
PIX *mat8ToPix(const cv::Mat *mat8);
cv::Mat pix8ToMat(PIX *pix8);
int main(int argc, char **argv)
{
// Load image
std::stringstream ss;
ss << path;
ss << argv[1];
cv::Mat im = cv::imread(ss.str() );
if (im.empty())
{
std::cout<<"Cannot open source image!" << std::endl;
return EXIT_FAILURE;
}
resize(im);
cv::Mat gray;
cv::cvtColor(im, gray, CV_BGR2GRAY);
// Pass it to Tesseract API
tesseract::TessBaseAPI tess;
tess.Init(NULL, "eng", tesseract::OEM_DEFAULT);
tess.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
tess.SetVariable("tessedit_char_whitelist", "QWERTYUIOPASDFGHJKLZXCVBNM");
PIX *image = mat8ToPix(&im);
//tess.SetImage((uchar*)gray.data, gray.cols, gray.rows, 1, gray.cols);
tess.SetImage(image);
// Get the text
char* out = tess.GetUTF8Text();
if(out != nullptr)
std::cout << "here it is: "<< out << std::endl;
cv::imshow("image", im);
cv::imshow("gray", gray);
cv::waitKey();
return 0;
}
void resize(cv::Mat &img)
{
while(img.size().width >= 500 && img.size().height >= 500 )
cv::resize(img, img, cv::Size(img.size().width/2, img.size().height/2) );
}
PIX *mat8ToPix(const cv::Mat *mat8)
{
PIX *pixd = pixCreate(mat8->size().width, mat8->size().height, 8);
for(int y=0; y<mat8->rows; y++)
for(int x=0; x<mat8->cols; x++)
pixSetPixel(pixd, x, y, (l_uint32) mat8->at<uchar>(y,x));
return pixd;
}
cv::Mat pix8ToMat(PIX *pix8)
{
cv::Mat mat(cv::Size(pix8->w, pix8->h), CV_8UC1);
uint32_t *line = pix8->data;
for (uint32_t y = 0; y < pix8->h; ++y)
{
for (uint32_t x = 0; x < pix8->w; ++x)
mat.at<uchar>(y, x) = GET_DATA_BYTE(line, x);
line += pix8->wpl;
}
return mat;
}
whatever picture I put to process I get this on terminal:
$: Warning: Invalid resolution 0 dpi. Using 70 instead.
Does anyone have some solution?
Thanks in advance.
If you know the input image's resolution, you can call pixSetResolution on Leptonica Pix object.
Or use Tesseract API to pass in the value. See
Tess4j - Pdf to Tiff to tesseract - "Warning: Invalid resolution 0 dpi. Using 70 instead."
Maybe it helps: I used EMGU & C#, but I think it must be the same in C++:
ocr.SetVariable("user_defined_dpi", "70");
... and the message should disappear ;)
I had similar issue. Found out from here that dark background in the image is the problem. Inversion of the image colors worked.
I want to provide center coordinates of small square i.e (5,5)[small square is 10x10] to be placed on (8,8) coordinates of larger square of 20x20.
to get this image result:
try this, I didn't test it though, so please comment if it doesn't work and I'll try to fix it =)
cv::Mat placeImageInImage(cv::Point center, cv::Mat src, cv::Mat dst_orig)
{
cv::Mat dst = dst_orig.clone();
cv::Rect targetRect = cv::Rect(center.x-src.cols/2, center.y-src.rows/2, src.cols, src.rows);
cv::Rect srcRect = cv::Rect(0,0,src.cols, src.rows);
cv::Rect dstRect = cv::Rect(0,0,dst.cols, dst.rows);
cv::Rect roi = targetRect & dstRect;
srcRect.width = roi.width;
srcRect.height = roi.height;
// didnt test, maybe swap the order:
srcRect.x += roi.x - targetRect.x;
srcRect.y += roi.y - targetRect.y;
src(srcRect).copyTo(dst(roi));
return dst;
}
using this code to test:
int main(int argc, char* argv[])
{
cv::Mat input = cv::imread("C:/StackOverflow/Input/Lenna.png");
cv::Mat input2;
cv::resize(input, input2, cv::Size(256, 256));
cv::Mat result = placeImageInImage(cv::Point(256, 256), input2, input);
cv::imshow("input", input);
cv::imshow("result", result);
cv::waitKey(0);
return 0;
}
I get:
I tried to perform EM based back ground foreground segmentation using a code below...which I also found in Stackoverflow....But seems there is some error somewhere as I dont ever see the second printf statement to get executed... . basically it is never reaching the classification/clustering part of the code..The code is given below..Could someone help me on this ?
#include <opencv2/opencv.hpp>
#include <opencv2/legacy/legacy.hpp>
char str1[60];
int main()
{
cv::Mat source = cv::imread("C:\\Image Input\\part1.bmp" );
if(!source.data)
printf(" No data \n");
//ouput images
cv::Mat meanImg(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg(source.rows, source.cols, CV_8UC3);
//convert the input image to float
cv::Mat floatSource;
source.convertTo(floatSource, CV_32F);
//now convert the float image to column vector
cv::Mat samples(source.rows * source.cols, 3, CV_32FC1);
int idx = 0;
for (int y = 0; y < source.rows; y++) {
cv::Vec3f* row = floatSource.ptr<cv::Vec3f > (y);
for (int x = 0; x < source.cols; x++) {
samples.at<cv::Vec3f > (idx++, 0) = row[x];
}
}
printf(" After Loop \n");
//we need just 2 clusters
cv::EMParams params(2);
cv::ExpectationMaximization em(samples, cv::Mat(), params);
//the two dominating colors
cv::Mat means = em.getMeans();
//the weights of the two dominant colors
cv::Mat weights = em.getWeights();
//we define the foreground as the dominant color with the largest weight
const int fgId = weights.at<float>(0) > weights.at<float>(1) ? 0 : 1;
printf(" After Training \n");
//now classify each of the source pixels
idx = 0;
for (int y = 0; y < source.rows; y++)
{
printf(" Now Classify\n");
for (int x = 0; x < source.cols; x++)
{
//classify
const int result = cvRound(em.predict(samples.row(idx++), NULL));
//get the according mean (dominant color)
const double* ps = means.ptr<double>(result, 0);
//set the according mean value to the mean image
float* pd = meanImg.ptr<float>(y, x);
//float images need to be in [0..1] range
pd[0] = ps[0] / 255.0;
pd[1] = ps[1] / 255.0;
pd[2] = ps[2] / 255.0;
//set either foreground or background
if (result == fgId) {
fgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
} else {
bgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
}
}
}
printf(" Show Images \n");
cv::imshow("Means", meanImg);
cv::imshow("Foreground", fgImg);
cv::imshow("Background", bgImg);
cv::waitKey(0);
return 0;
}
The code works fine. I think that you use too large images, and learning takes too long time. Try process small images.
Just 1 correction, initialize images with zeros:
//ouput images
cv::Mat meanImg=Mat::zeros(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);
as input data I have a 24 bit RGB image and a palette with 2..20 fixed colours. These colours are in no way spread regularly over the full colour range.
Now I have to modify the colours of input image so that only the colours of the given palette are used - using the colour out of the palette that is closest to the original colour (not closest mathematically but for human's visual impression). So what I need is an algorithm that uses an input colour and finds the colour in target palette that visually fits best to this colour. Please note: I'm not looking for a stupid comparison/difference algorithm but for something that really incorporates the impression a colour has on humans!
Since this is something that already should have been done and because I do not want to re-invent the wheel again: is there some example source code out there that does this job? In best case it is really a piece of code and not a link to a desastrous huge library ;-)
(I'd guess OpenCV does not provide such a function?)
Thanks
You should look at the Lab color space. It was designed so that the distance in the colour space equals the perceptual distance. So once you have converted your image you can compute the distances as you would have done earlier, but should get a better result from a perceptual point of view. In OpenCV you can use the cvtColor(source, destination, CV_BGR2Lab) function.
Another Idea would be to use dithering. The idea is to mix missing colours using neighbouring pixels. A popular algorithm for this is Floyd-Steinberg dithering.
Here is an example of mine, where I combined a optimized palette using k-means with the Lab colourspace and floyd steinberg dithering:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
cv::Mat floydSteinberg(cv::Mat img, cv::Mat palette);
cv::Vec3b findClosestPaletteColor(cv::Vec3b color, cv::Mat palette);
int main(int argc, char** argv)
{
// Number of clusters (colors on result image)
int nrColors = 18;
cv::Mat imgBGR = imread(argv[1],1);
cv::Mat img;
cvtColor(imgBGR, img, CV_BGR2Lab);
cv::Mat colVec = img.reshape(1, img.rows*img.cols); // change to a Nx3 column vector
cv::Mat colVecD;
colVec.convertTo(colVecD, CV_32FC3, 1.0); // convert to floating point
cv::Mat labels, centers;
cv::kmeans(colVecD, nrColors, labels,
cv::TermCriteria(CV_TERMCRIT_ITER, 100, 0.1),
3, cv::KMEANS_PP_CENTERS, centers); // compute k mean centers
// replace pixels by there corresponding image centers
cv::Mat imgPosterized = img.clone();
for(int i = 0; i < img.rows; i++ )
for(int j = 0; j < img.cols; j++ )
for(int k = 0; k < 3; k++)
imgPosterized.at<Vec3b>(i,j)[k] = centers.at<float>(labels.at<int>(j+img.cols*i),k);
// convert palette back to uchar
cv::Mat palette;
centers.convertTo(palette,CV_8UC3,1.0);
// call floyd steinberg dithering algorithm
cv::Mat fs = floydSteinberg(img, palette);
cv::Mat imgPosterizedBGR, fsBGR;
cvtColor(imgPosterized, imgPosterizedBGR, CV_Lab2BGR);
cvtColor(fs, fsBGR, CV_Lab2BGR);
imshow("input",imgBGR); // original image
imshow("result",imgPosterizedBGR); // posterized image
imshow("fs",fsBGR); // floyd steinberg dithering
waitKey();
return 0;
}
cv::Mat floydSteinberg(cv::Mat imgOrig, cv::Mat palette)
{
cv::Mat img = imgOrig.clone();
cv::Mat resImg = img.clone();
for(int i = 0; i < img.rows; i++ )
for(int j = 0; j < img.cols; j++ )
{
cv::Vec3b newpixel = findClosestPaletteColor(img.at<Vec3b>(i,j), palette);
resImg.at<Vec3b>(i,j) = newpixel;
for(int k=0;k<3;k++)
{
int quant_error = (int)img.at<Vec3b>(i,j)[k] - newpixel[k];
if(i+1<img.rows)
img.at<Vec3b>(i+1,j)[k] = min(255,max(0,(int)img.at<Vec3b>(i+1,j)[k] + (7 * quant_error) / 16));
if(i-1 > 0 && j+1 < img.cols)
img.at<Vec3b>(i-1,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i-1,j+1)[k] + (3 * quant_error) / 16));
if(j+1 < img.cols)
img.at<Vec3b>(i,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i,j+1)[k] + (5 * quant_error) / 16));
if(i+1 < img.rows && j+1 < img.cols)
img.at<Vec3b>(i+1,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i+1,j+1)[k] + (1 * quant_error) / 16));
}
}
return resImg;
}
float vec3bDist(cv::Vec3b a, cv::Vec3b b)
{
return sqrt( pow((float)a[0]-b[0],2) + pow((float)a[1]-b[1],2) + pow((float)a[2]-b[2],2) );
}
cv::Vec3b findClosestPaletteColor(cv::Vec3b color, cv::Mat palette)
{
int i=0;
int minI = 0;
cv::Vec3b diff = color - palette.at<Vec3b>(0);
float minDistance = vec3bDist(color, palette.at<Vec3b>(0));
for (int i=0;i<palette.rows;i++)
{
float distance = vec3bDist(color, palette.at<Vec3b>(i));
if (distance < minDistance)
{
minDistance = distance;
minI = i;
}
}
return palette.at<Vec3b>(minI);
}
Try this algorithm (it will reduct color number, but it compute palette by itself):
#include <opencv2/opencv.hpp>
#include "opencv2/legacy/legacy.hpp"
#include <vector>
#include <list>
#include <iostream>
using namespace cv;
using namespace std;
void main(void)
{
// Number of clusters (colors on result image)
int NrGMMComponents = 32;
// Source file name
string fname="D:\\ImagesForTest\\tools.jpg";
cv::Mat SampleImg = imread(fname,1);
//cv::GaussianBlur(SampleImg,SampleImg,Size(5,5),3);
int SampleImgHeight = SampleImg.rows;
int SampleImgWidth = SampleImg.cols;
// Pick datapoints
vector<Vec3d> ListSamplePoints;
for (int y=0; y<SampleImgHeight; y++)
{
for (int x=0; x<SampleImgWidth; x++)
{
// Get pixel color at that position
Vec3b bgrPixel = SampleImg.at<Vec3b>(y, x);
uchar b = bgrPixel.val[0];
uchar g = bgrPixel.val[1];
uchar r = bgrPixel.val[2];
if(rand()%25==0) // Pick not every, bu t every 25-th
{
ListSamplePoints.push_back(Vec3d(b,g,r));
}
} // for (x)
} // for (y)
// Form training matrix
Mat labels;
int NrSamples = ListSamplePoints.size();
Mat samples( NrSamples, 3, CV_32FC1 );
for (int s=0; s<NrSamples; s++)
{
Vec3d v = ListSamplePoints.at(s);
samples.at<float>(s,0) = (float) v[0];
samples.at<float>(s,1) = (float) v[1];
samples.at<float>(s,2) = (float) v[2];
}
cout << "Learning to represent the sample distributions with" << NrGMMComponents << "gaussians." << endl;
// Algorithm parameters
CvEMParams params;
params.covs = NULL;
params.means = NULL;
params.weights = NULL;
params.probs = NULL;
params.nclusters = NrGMMComponents;
params.cov_mat_type = CvEM::COV_MAT_GENERIC; // DIAGONAL, GENERIC, SPHERICAL
params.start_step = CvEM::START_AUTO_STEP;
params.term_crit.max_iter = 1500;
params.term_crit.epsilon = 0.001;
params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS;
//params.term_crit.type = CV_TERMCRIT_ITER;
// Train
cout << "Started GMM training" << endl;
CvEM em_model;
em_model.train( samples, Mat(), params, &labels );
cout << "Finished GMM training" << endl;
// Result image
Mat img = Mat::zeros( Size( SampleImgWidth, SampleImgHeight ), CV_8UC3 );
// Ask classifier for each pixel
Mat sample( 1, 3, CV_32FC1 );
Mat means;
means=em_model.getMeans();
for(int i = 0; i < img.rows; i++ )
{
for(int j = 0; j < img.cols; j++ )
{
Vec3b v=SampleImg.at<Vec3b>(i,j);
sample.at<float>(0,0) = (float) v[0];
sample.at<float>(0,1) = (float) v[1];
sample.at<float>(0,2) = (float) v[2];
int response = cvRound(em_model.predict( sample ));
img.at<Vec3b>(i,j)[0]=means.at<double>(response,0);
img.at<Vec3b>(i,j)[1]=means.at<double>(response,1);
img.at<Vec3b>(i,j)[2]=means.at<double>(response,2);
}
}
img.convertTo(img,CV_8UC3);
imshow("result",img);
waitKey();
// Save the result
cv::imwrite("result.png", img);
}
PS: For perceptive color distance measurement it's better to use L*a*b color space. There is converter in opencv for this purpose. For clustering you can use k-means with defined cluster centers (your palette entries). After clustering you'll get points with indexes of palette intries.
Good day. I'm new to OpenCV and right now, I'm trying to do fingertip detection using colour tracking and background subtraction methods. I got the colour tracking part working but I have no idea on how to subtract the background and leave only the fingertips.
Here is my code.
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
IplImage* GetThresholdedImage(IplImage* img, CvScalar& lowerBound, CvScalar& upperBound)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, lowerBound, upperBound, imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
int lineThickness = 2;
CvScalar lowerBound = cvScalar(20, 100, 100);
CvScalar upperBound = cvScalar(30, 255, 255);
int b,g,r;
lowerBound = cvScalar(0,58,89);
upperBound = cvScalar(25,173,229);
CvCapture* capture = 0;
capture = cvCaptureFromCAM(1);
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the object
IplImage* imgScribble = NULL;
while(true)
{
IplImage* frame = 0;
frame = cvQueryFrame(capture);
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the thresholded image (tracked color -> white, the rest -> black)
IplImage* imgThresh = GetThresholdedImage(frame,lowerBound,upperBound);
// Calculate the moments to estimate the position of the object
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
cout << "position = " << posX << " " << posY << endl;
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), upperBound, lineThickness);
}
// Add the scribbling image and the frame...
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgThresh);
cvShowImage("video", frame);
int c = cvWaitKey(10);
if(c==27) //ESC key
{
break;
}
cvReleaseImage(&imgThresh);
delete moments;
}
cvReleaseCapture(&capture);
return 0;
}
I don t know if I understand you right but I think you should need to add the following:
cvErode(imgThreshed, imgThreshed, NULL, 1);
cvDilate(imgThreshed, imgThreshed, NULL, 1);
in GetThresholdedImage and get less noise ! but after all I think it would be better for you to use the cv::Mat object of opencv ;)
Try BGS library, I used it before and like it. You can get it here: http://code.google.com/p/bgslibrary/