I'm trying to create and HSV Histogram using the following code
Mat image = new Mat(file, LoadImageType.Color);
int hBins = 16;
int sBins = 16;
int vBins = 16;
RangeF hRange = new RangeF(0F, 180F);
RangeF sRange = new RangeF(0f, 255f);
RangeF vRange = new RangeF(0f, 255f);
Image<Bgr, Byte> imageSource = new Image<Bgr, Byte>(image.Bitmap);
Image<Hsv, Byte> imageHsv = imageSource.Convert<Hsv, Byte>();
DenseHistogram hist = new DenseHistogram(new int[] { hBins, sBins, vBins }, new RangeF[] { hRange, sRange, vRange });
hist.Calculate<byte>(imageHsv.Split(), false, null);
Problem is though, that when calling hist.GetBinValues(), all the values of the bins are zero
Computing the histogram channel by channel seems to give the expected output:
Mat image = new Mat(file, LoadImageType.Color);
int hBins = 16;
int sBins = 16;
int vBins = 16;
RangeF hRange = new RangeF(0F, 180F);
RangeF sRange = new RangeF(0f, 256f);
RangeF vRange = new RangeF(0f, 256f);
var imageSource = image.ToImage<Bgr, Byte>();
Image<Hsv, Byte> imageHsv = imageSource.Convert<Hsv, Byte>();
var hsvChannels = imageHsv.Split();
DenseHistogram hHist = new DenseHistogram(hBins, hRange);
DenseHistogram sHist = new DenseHistogram(sBins, sRange);
DenseHistogram vHist = new DenseHistogram(vBins, vRange);
hHist.Calculate<byte>(new Image<Gray, Byte>[] { hsvChannels[0] }, false, null);
sHist.Calculate<byte>(new Image<Gray, Byte>[] { hsvChannels[1] }, false, null);
vHist.Calculate<byte>(new Image<Gray, Byte>[] { hsvChannels[2] }, false, null);
var hVals = hHist.GetBinValues();
var sVals = sHist.GetBinValues();
var vVals = vHist.GetBinValues();.
I can't answer for why your method does not work, though. I see GetBinValues() returns an array of 16 ^ 3 values where I would expect it to be 16 * 3 values.
After battling with this for a few days, I dumped EMGU and just used OpenCV from c++, and this is giving me the correct HSV bins.
Related
I'm new to opencv and would like to use it to crop portions of an image and then use tesseract to read them. I'm not sure what's the best way to crop all the necessary boxes that i need.
Here is an easy example of the document i need to transform:
Any advice on what would be the best?
I tried with ORB and the following image as template:
But without success.
On the template, some lines are selected as keypoints but on the image i want to process it's mainly the text and not the lines. Is it a bad template? Do i need to process the image first?
and my code:
Feature2D f2d = ORB.create(5000); // SIFT.create(1000);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
Mat descriptors1 = new Mat();
Mat mask1 = new Mat();
f2d.detectAndCompute(img1, mask1, keypoints1, descriptors1);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors2 = new Mat();
Mat mask2 = new Mat();
f2d.detectAndCompute(img2, mask2, keypoints2, descriptors2);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
Mat outputImg = new Mat();
MatOfByte drawnMatches = new MatOfByte();
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, outputImg, new Scalar(0, 255, 0), new Scalar(255, 0, 0), drawnMatches, Features2d.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS);
I could get good results by using a template that contains all the text that never change in the form. Furthermore, creating 2 templates (1 per page) and using SIFT instead of ORB helped a lot too.
Here is my solution:
public static Mat matchTEmplateSIFT(Mat img1, Mat template, boolean showKeypoints, boolean drawMatchs) {
Feature2D f2d = SIFT.create(15000);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_SL2); // or FLANNBASED for better performance
return matchTEmplate(img1, template, f2d, matcher);
}
public static Mat matchTEmplate(Mat baseImage, Mat template, Feature2D f2d, DescriptorMatcher matcher) {
int dilateSize = 5;
Mat scene = dilateBitwise(dilateSize, baseImage.clone());
template = dilateBitwise(dilateSize, template.clone());
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
Mat descriptors1 = new Mat();
f2d.detectAndCompute(scene, new Mat(), keypoints1, descriptors1);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors2 = new Mat();
f2d.detectAndCompute(template, new Mat(), keypoints2, descriptors2);
List<MatOfDMatch> matches = new ArrayList<>();
matcher.knnMatch(descriptors1, descriptors2, matches, 2);
MatOfDMatch goodMatches = getBestMatches(matches);
Mat result = transformAndWarp(baseImage, template, keypoints1, keypoints2, goodMatches);
return result;
}
private static Mat transformAndWarp(Mat baseImage, Mat template, MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch goodMatches) {
Mat H = findHomographyMatrix(keypoints1, keypoints2, goodMatches);
perspectiveTransform(template, H);
Mat result = new Mat();
Imgproc.warpPerspective(baseImage, result, H, new Size(template.cols(), template.rows()));
return result;
}
private static void perspectiveTransform(Mat template, Mat H) {
Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
obj_corners.put(0, 0, new double[]{0, 0});
obj_corners.put(0, 0, new double[]{template.cols(), 0});
obj_corners.put(0, 0, new double[]{template.cols(), template.rows()});
obj_corners.put(0, 0, new double[]{0, template.rows()});
Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);
Core.perspectiveTransform(obj_corners, scene_corners, H);
}
private static Mat findHomographyMatrix(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch goodMatches) {
LinkedList<Point> templateList = new LinkedList<>();
LinkedList<Point> sceneList = new LinkedList<>();
List<KeyPoint> templateKeyPoints = keypoints1.toList();
List<KeyPoint> sceneKeypoints = keypoints2.toList();
for (int i = 0; i < goodMatches.toList().size(); i++) {
templateList.addLast(templateKeyPoints.get(goodMatches.toList().get(i).queryIdx).pt);
sceneList.addLast(sceneKeypoints.get(goodMatches.toList().get(i).trainIdx).pt);
}
MatOfPoint2f templateMat = new MatOfPoint2f();
templateMat.fromList(templateList);
MatOfPoint2f sceneMat = new MatOfPoint2f();
sceneMat.fromList(sceneList);
return Calib3d.findHomography(templateMat, sceneMat, Calib3d.RANSAC);
}
// https://docs.opencv.org/3.4/d5/d6f/tutorial_feature_flann_matcher.html
private static MatOfDMatch getBestMatches(List<MatOfDMatch> knnMatches) {
//-- Filter matches using the Lowe's ratio test
float ratioThresh = 0.5f;
List<DMatch> listOfGoodMatches = new ArrayList<>();
for (int i = 0; i < knnMatches.size(); i++) {
if (knnMatches.get(i).rows() > 1) {
DMatch[] matches = knnMatches.get(i).toArray();
if (matches[0].distance < ratioThresh * matches[1].distance) {
listOfGoodMatches.add(matches[0]);
}
}
}
MatOfDMatch matOfDMatch = new MatOfDMatch();
matOfDMatch.fromList(listOfGoodMatches);
return matOfDMatch;
}
I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}
I have the following .fx file:
Texture2D texture2d;
SamplerState linearSampler
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = Clamp;
AddressV = Clamp;
};
struct VS_IN
{
float4 pos : POSITION;
float3 uvq : TEXCOORD0;
};
struct PS_IN
{
float4 pos : SV_POSITION;
float3 uvq : TEXCOORD0;
};
PS_IN VS(VS_IN input)
{
PS_IN output = (PS_IN)0;
output.pos = float4(input.pos.xyz, 1.0);
output.uvq = input.uvq;
return output;
}
float4 PS(PS_IN input) : SV_Target
{
return texture2d.Sample(linearSampler, input.uvq.xy);
}
technique10 Render
{
pass P0
{
SetVertexShader(CompileShader(vs_4_0, VS()));
SetGeometryShader(NULL);
SetPixelShader(CompileShader(ps_4_0, PS()));
}
}
(Note: uvq is float3 for a reason, q will later be used but it's just ignored now).
When I use this effect to texture map a quad (texture image to the left), I get the rendering to the right: the pixels come from the texture all right but they are rather strangely sampled and repeated in stripes.
The co-ordinates are nothing special, the full texture:
public Vertex[] vertices = {
new Vertex() { Position = new D3.Vector4F(-0.7f, -0.5f, 0, 0), UVQ = new D3.Vector3F(0f, 1f, 0) },
new Vertex() { Position = new D3.Vector4F(-0.4f, 0.6f, 0, 0), UVQ = new D3.Vector3F(0f, 0f, 0) },
new Vertex() { Position = new D3.Vector4F( 0.5f, -0.5f, 0, 0), UVQ = new D3.Vector3F(1f, 1f, 0) },
new Vertex() { Position = new D3.Vector4F( 0.3f, 0.9f, 0, 0), UVQ = new D3.Vector3F(1f, 0f, 0) },
};
The texture has been read using the TextureLoader.cs helper routines as they come in the Code Pack API tutorial.
var TextureVariable = DxEffect.GetVariableByName("texture2d").AsShaderResource;
var stream = Application.GetResourceStream(MakePackUri("Resources/image.png"));
TextureVariable.Resource = DXUtil.TextureLoader.LoadTexture(DxDevice, stream.Stream);
Input layout is as follows:
var layout = new D3D.InputElementDescription[] {
new D3D.InputElementDescription() { SemanticName = "POSITION", SemanticIndex = 0, Format = GX.Format.R32G32B32A32Float, AlignedByteOffset = 0, InputSlot = 0, InputSlotClass = D3D.InputClassification.PerVertexData, InstanceDataStepRate = 0 },
new D3D.InputElementDescription() { SemanticName = "TEXCOORD", SemanticIndex = 0, Format = GX.Format.R32G32B32Float, AlignedByteOffset = 16, InputSlot = 0, InputSlotClass = D3D.InputClassification.PerVertexData, InstanceDataStepRate = 0 }
};
var DxPassDesc = DxTechnique.GetPassByIndex(0).Description;
var DxLayout = DxDevice.CreateInputLayout(layout, DxPassDesc.InputAssemblerInputSignature, DxPassDesc.InputAssemblerInputSignatureSize);
DxDevice.IA.InputLayout = DxLayout;
var vertex = new VertexArray();
var DxVertexBufferDescription = new D3D.BufferDescription() { BindingOptions = D3D.BindingOptions.VertexBuffer, CpuAccessOptions = D3D.CpuAccessOptions.None, MiscellaneousResourceOptions = D3D.MiscellaneousResourceOptions.None, Usage = D3D.Usage.Default, ByteWidth = (uint)Marshal.SizeOf(vertex) };
IntPtr vertexData = Marshal.AllocCoTaskMem(Marshal.SizeOf(vertex));
Marshal.StructureToPtr(vertex, vertexData, false);
var DxVertexBufferInitData = new D3D.SubresourceData() { SystemMemory = vertexData, SystemMemoryPitch = 0, SystemMemorySlicePitch = 0 };
var DxVertices = DxDevice.CreateBuffer(DxVertexBufferDescription, DxVertexBufferInitData);
uint stride = (uint)Marshal.SizeOf(typeof(Vertex));
uint offset = 0;
DxDevice.IA.SetVertexBuffers(0, new D3D.D3DBuffer[] { DxVertices }, new uint[] { stride }, new uint[] { offset });
Marshal.FreeCoTaskMem(vertexData);
Solved. The maximum texture size seems to be 512 (although descriptions at MS seem to suggest larger values for DX10 Texture2D). Well...
i am new to this website, please let me know if i have made any mistake on my post.
I have some questions regarding calculating and drawing histogram in javacv. Below are the codes that i have written based on some information that i have searched:
There is this error that i get: OpenCV Error: One of arguments' values is out of range (index is out of range) in unknown function, file ......\src\opencv\modules\core\src\array.cpp, line 1691
private CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImage[] hsvImage1 = {image};
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImage DrawHistogram(CvHistogram hist, IplImage image) {//draw histogram
int scaleX = 1;
int scaleY = 1;
int i;
float[] max_value = {0};
int[] int_value = {0};
cvGetMinMaxHistValue(hist, max_value, max_value, int_value, int_value);//get min and max value for histogram
IplImage imgHist = cvCreateImage(cvSize(256, image.height() ),IPL_DEPTH_8U,1);//create image to store histogram
cvZero(imgHist);
CvPoint pts = new CvPoint(5);
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
pts.position(0).x(i * scaleX).y(image.height() * scaleY);
pts.position(1).x(i * scaleX + scaleX).y(image.height() * scaleY);
pts.position(2).x(i * scaleX + scaleX).y((int)((image.height() - nextValue * image.height() /max_value[0]) * scaleY));
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY));
pts.position(4).x(i * scaleX).y(image.height() * scaleY);
cvFillConvexPoly(imgHist, pts.position(0), 5, CvScalar.RED, CV_AA, 0);
}
return imgHist;
}
I have tried searching few links that i provided at the bottom, however, each of them are in different language, therefore i am not sure i have converted them to java correctly. To be honest there are few things i doubt, will be glad if any advice can be provided, such as:
float[] max_value = {0}; // i referred to the internet and it helps me to getby syntax error in cvGetMinMaxHistValue() , not sure if it will cause logic error
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY)); // i put int to downcast it to the type the pts will recognise, and one more thing is max_value[0] is 0, wondering if it will cause logical error due to division
Links used:
//use this
public CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImageArray hsvImage1 = splitChannels(image);
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImageArray splitChannels(IplImage hsvImage) {
CvSize size = hsvImage.cvSize();
int depth = hsvImage.depth();
IplImage channel0 = cvCreateImage(size, depth, 1);
IplImage channel1 = cvCreateImage(size, depth, 1);
IplImage channel2 = cvCreateImage(size, depth, 1);
cvSplit(hsvImage, channel0, channel1, channel2, null);
return new IplImageArray(channel0, channel1, channel2);
}
Your error is in this part:
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
You use i+1 and it causes the error out of range, you can use your for until 255 to correct it.
I hope I helped you. GL
I have develop a program to detect motions using JavaCV. up to now i have completed cvFindContours of the processed image. source code is given below,
public class MotionDetect {
public static void main(String args[]) throws Exception, InterruptedException {
//FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new File("D:/pool.avi"));
OpenCVFrameGrabber grabber = new OpenCVFrameGrabber("D:/2.avi");
final CanvasFrame canvas = new CanvasFrame("My Image");
final CanvasFrame canvas2 = new CanvasFrame("ROI");
canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE);
grabber.start();
IplImage frame = grabber.grab();
CvSize imgsize = cvGetSize(frame);
IplImage grayImage = cvCreateImage(imgsize, IPL_DEPTH_8U, 1);
IplImage ROIFrame = cvCreateImage(cvSize((265 - 72), (214 - 148)), IPL_DEPTH_8U, 1);
IplImage colorImage;
IplImage movingAvg = cvCreateImage(imgsize, IPL_DEPTH_32F, 3);
IplImage difference = null;
IplImage temp = null;
IplImage motionHistory = cvCreateImage(imgsize, IPL_DEPTH_8U, 3);
CvRect bndRect = cvRect(0, 0, 0, 0);
CvPoint pt1 = new CvPoint(), pt2 = new CvPoint();
CvFont font = null;
//Capture the movie frame by frame.
int prevX = 0;
int numPeople = 0;
char[] wow = new char[65];
int avgX = 0;
//Indicates whether this is the first time in the loop of frames.
boolean first = true;
//Indicates the contour which was closest to the left boundary before the object
//entered the region between the buildings.
int closestToLeft = 0;
//Same as above, but for the right.
int closestToRight = 320;
while (true) {
colorImage = grabber.grab();
if (colorImage != null) {
if (first) {
difference = cvCloneImage(colorImage);
temp = cvCloneImage(colorImage);
cvConvertScale(colorImage, movingAvg, 1.0, 0.0);
first = false;
//cvShowImage("My Window1", difference);
} //else, make a running average of the motion.
else {
cvRunningAvg(colorImage, movingAvg, 0.020, null);
}
//Convert the scale of the moving average.
cvConvertScale(movingAvg, temp, 1.0, 0.0);
//Minus the current frame from the moving average.
cvAbsDiff(colorImage, temp, difference);
//Convert the image to grayscale.
cvCvtColor(difference, grayImage, CV_RGB2GRAY);
//canvas.showImage(grayImage);
//Convert the image to black and white.
cvThreshold(grayImage, grayImage, 70, 255, CV_THRESH_BINARY);
//Dilate and erode to get people blobs
cvDilate(grayImage, grayImage, null, 18);
cvErode(grayImage, grayImage, null, 10);
canvas.showImage(grayImage);
ROIFrame = cvCloneImage(grayImage);
cvSetImageROI(ROIFrame, cvRect(72, 148, (265 - 72), (214 - 148)));
//cvOr(outFrame, tempFrame, outFrame);
cvShowImage("ROI Frame", ROIFrame);
cvRectangle(colorImage, /* the dest image */
cvPoint(72, 148), /* top left point */
cvPoint(265, 214), /* bottom right point */
cvScalar(255, 0, 0, 0), /* the color; blue */
1, 8, 0);
CvMemStorage storage = cvCreateMemStorage(0);
CvSeq contour = new CvSeq(null);
cvFindContours(grayImage, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
}
//Show the frame.
cvShowImage("My Window", colorImage);
//Wait for the user to see it.
cvWaitKey(10);
}
//If this is the first time, initialize the images.
//Thread.sleep(50);
}
}
}
In this code ROIFrame, i need to calculate white contours area or pixel numbers??.. is there any way that i can proceed with
Use the function cvContourArea() Documentation here.
In your code, after your cvFindContours, do a loop with all of your contours like as:
CvSeq* curr_contour = contour;
while (curr_contour != NULL) {
area = fabs(cvContourArea(curr_contour,CV_WHOLE_SEQ, 0));
current_contour = current_contour->h_next;
}
Don't forget to store the area somewhere.