copying pixel by pixel in openCV - opencv

I have a code where i will copy a video to another video
When i copy it somehow the angle change
heres a link for a picture
http://i24.photobucket.com/albums/c22/Klifford_Kho/wrongpixel_zpsloshtqqy.png
Mat frame;
Mat processedImage;
void copy()
{
for (int i = 0; i<400; i++)
{
for (int j = 0; j<200; j++)
{
int b = frame.at<cv::Vec3b>(i, j)[0];
int g = frame.at<cv::Vec3b>(i, j)[1];
int r = frame.at<cv::Vec3b>(i, j)[2];
processedImage.at<cv::Vec3b>(i, j)[0] = b;
processedImage.at<cv::Vec3b>(i, j)[1] = g;
processedImage.at<cv::Vec3b>(i, j)[2] = r;
}
}
int main()
{
VideoCapture cap(0); // get first cam
while (cap.isOpened())
{
if (!cap.read(frame)) // cam might need some warmup
continue;
processedImage = cv::Mat(frame.size().height, frame.size().width, CV_8UC1);
processedImage.setTo(cv::Scalar::all(0));
copy();
imshow("Original", frame);
imshow("Processed", processedImage);
if (waitKey(10) == 27)
break;
}
return 0;
}
P.S. I didnt use frame.cols and frame.rows in the condition because it generated an error
heres a picture of the error
http://i24.photobucket.com/albums/c22/Klifford_Kho/wrongpixel_zpswze5qjrr.png

It is because you create single channel destination image.
processedImage = cv::Mat(frame.size().height, frame.size().width, CV_8UC1);
Change CV_8UC1 to CV_8UC3, it should help also with error mentioned at question end.

Related

How to calculate perimeter of a binary image using OpenCV 4.2 in C++

I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}

error in runing source face detection in opencv 2.4.9 and vs 2012

i use pre-build opencv 2.4.9 i test the image show in opencv 2.4.9 it works,but for this source its have error?! if this errors is that i use pre-build opencv?what to do what not to do
i copy the xml file in current folder and my hardware corei5,Radeon ATI graphic
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <opencv/ml.h>
void doMosaic(IplImage* in, int x, int y,
int width, int height, int size);
int main (int argc, char **argv)
{
int i, c;
IplImage *src_img = 0, *src_gray = 0;
const char *cascade_name = "haarcascade_frontalface_alt.xml";
CvHaarClassifierCascade *cascade = 0;
CvMemStorage *storage = 0;
CvSeq *faces;
cascade = (CvHaarClassifierCascade *) cvLoad (cascade_name, 0, 0, 0);
cvNamedWindow ("Capture", CV_WINDOW_AUTOSIZE);
CvCapture *capture = cvCreateCameraCapture(0);
assert(capture != NULL);
while (1) {
src_img = cvQueryFrame (capture);
src_gray = cvCreateImage (cvGetSize(src_img), IPL_DEPTH_8U, 1);
storage = cvCreateMemStorage (0);
cvClearMemStorage (storage);
cvCvtColor (src_img, src_gray, CV_BGR2GRAY);
cvEqualizeHist (src_gray, src_gray);
faces = cvHaarDetectObjects (src_gray, cascade, storage,
1.11, 4, 0, cvSize (40, 40));
for (i = 0; i < (faces ? faces->total : 0); i++) {
CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
doMosaic(src_img, r->x, r->y, r->width, r->height, 20);
}
cvShowImage("Capture", src_img);
cvReleaseImage(&src_gray);
c = cvWaitKey (2);
if (c == '\x1b')
break;
}
cvReleaseCapture (&capture);
cvDestroyWindow ("Capture");
return 0;
}
void doMosaic(IplImage* in, int x0, int y0,
int width, int height, int size)
{
int b, g, r, col, row;
int xMin = size*(int)floor((double)x0/size);
int yMin = size*(int)floor((double)y0/size);
int xMax = size*(int)ceil((double)(x0+width)/size);
int yMax = size*(int)ceil((double)(y0+height)/size);
for(int y=yMin; y<yMax; y+=size){
for(int x=xMin; x<xMax; x+=size){
b = g = r = 0;
for(int i=0; i<size; i++){
if( y+i > in->height ){
break;
}
row = i;
for(int j=0; j<size; j++){
if( x+j > in->width ){
break;
}
b += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3];
g += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3+1];
r += (unsigned char)in->imageData[in->widthStep*(y+i)+(x+j)*3+2];
col = j;
}
}
row++;
col++;
for(int i=0;i<row;i++){
for(int j=0;j<col;j++){
in->imageData[in->widthStep*(y+i)+(x+j)*3] = cvRound((double)b/(row*col));
in->imageData[in->widthStep*(y+i)+(x+j)*3+1] = cvRound((double)g/(row*col));
in->imageData[in->widthStep*(y+i)+(x+j)*3+2] = cvRound((double)r/(row*col));
}
}
}
}
}
the error is a break in microsoft ,please help me.thanks very much
First-chance exception at 0x75C4B727 in opencv.exe: Microsoft C++ exception: cv::Exception at memory location 0x003CF678.
If there is a handler for this exception, the program may be safely continued.
finally i succeed,i first for PDB errors tools>option>debugging>outputwindow>moduleloadmessage >off and then i tools>option>debugging>symbols>microsoft symbols server not checked and then i chnge capture.open( -1 ); to capture.open( 0 ); now by seting in opencv itworks,thanks very much

Editing a matrix in OpenCV?

I am trying to obtain the first image, make some modification pixel by pixel and write the resultant to a second image.
int main(int argc, char* argv[])
{
namedWindow("Frame");
testVid("D:\\Penguins.mp4");
destroyAllWindows();
return EXIT_SUCCESS;
}
void testVid(char* videoFilename)
{
VideoCapture capture(videoFilename);
Vec3b pixel;
if (!capture.read(frame))
return;
Mat finalframe(frame.rows,frame.cols, CV_8UC3, Scalar(0, 0, 0));
for (int i = 0; i < frame.rows; i++)
for (int j = 0; j < frame.cols; j++)
{
pixel = frame.at<Vec3b>(Point(j, i));
finalframe.at<Vec3b>(Point(j, i)) = pixel;
}
imshow("Frame", finalframe);
keyboard = waitKey(30);
capture.release();
getchar();
}
However, when I display second image using imshow() , A blank grey image is shown.
Now even if I do not choose to do any modifications, I face similar problems when I tried second = first;
The only thing that seems to work is first.copyTo(second);
Any reason why the second image refuses to do anything ??

opencv r6010 abort() has been called error in visual studio 2013

I have some code to draw a line between two points on an image which are selected by mouse, and then to display a histogram.
However, when I press q as required by code I get an error saying R6010 abort() has been called and saying VC++ run time error.
Please advise me how I can find this error.
#include <vector>
#include "opencv2/highgui/highgui.hpp"
#include <opencv\cv.h>
#include <iostream>
#include<conio.h>
using namespace cv;
using namespace std;
struct Data_point
{
int x;
unsigned short int y;
};
int PlotMeNow(unsigned short int *values, unsigned int nSamples)
{
std::vector<Data_point> graph(nSamples);
for (unsigned int i = 0; i < nSamples; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}
cv::Size imageSize(5000, 500); // your window size
cv::Mat image(imageSize, CV_8UC1);
if (image.empty()) //check whether the image is valid or not
{
std::cout << "Error : Image cannot be created..!!" << std::endl;
system("pause"); //wait for a key press
return 0;
}
else
{
std::cout << "Good job : Image created successfully..!!" << std::endl;
}
// tru to do some ofesseting so the graph do not hide on x or y axis
Data_point dataOffset;
dataOffset.x = 20;
// we have to mirror the y axis!
dataOffset.y = 5000;
for (unsigned int i = 0; i<nSamples; ++i)
{
graph[i].x = (graph[i].x + dataOffset.x) * 3;
graph[i].y = (graph[i].y + dataOffset.y) / 200;
}
// draw the samples
for (unsigned int i = 0; i<nSamples - 1; ++i)
{
cv::Point2f p1;
p1.x = graph[i].x;
p1.y = graph[i].y;
cv::Point2f p2;
p2.x = graph[i + 1].x;
p2.y = graph[i + 1].y;
cv::line(image, p1, p2, 'r', 1, 4, 0);
}
cv::namedWindow("MyWindow1", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::imshow("MyWindow1", image); //display the image which is stored in the 'img' in the "MyWindow" window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("MyWindow1");
destroyWindow("MyWindow"); //destroy the window with the name, "MyWindow"
return 0;
}
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
LineIterator it(image, p2, p1, 8);
for (int i = 0; i < it.count; i++, it++)
{
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
}
*count1 = it.count;
}
//working line with mouse
void onMouse(int evt, int x, int y, int flags, void* param)
{
if (evt == CV_EVENT_LBUTTONDOWN)
{
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x, y));
}
}
void drawline(Mat image, std::vector<Point>& points)
{
cv::namedWindow("Output Window");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1 = 0, Y1 = 0, X2 = 0, Y2 = 0;
while (1)
{
cv::imshow("Output Window", image);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1 = points[0].x;
X2 = points[1].x;
Y1 = points[0].y;
Y2 = points[1].y;
// Draw a line
line(image, Point(X1, Y1), Point(X2, Y2), 'r', 2, 8);
cv::imshow("Output Window", image);
//exit image window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("Output Window");
}
void show_histogram_image(Mat img1)
{
int sbins = 65536;
int histSize[] = { sbins };
float sranges[] = { 0, 65536 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = { 0 };
cv::calcHist(&img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false);
double maxVal = 0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(65536, sbins*xscale, CV_16UC1);
for int s = 0; s < sbins; s++)
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal * 65535 / maxVal);
rectangle(hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point((s + 1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(65535), 1);
}
imshow("Histogram", hist_image);
waitKey(0);
}
int main()
{
vector<Point> points1;
vector<ushort>linePixels;
Mat img = cvLoadImage("desert.jpg");
if (img.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
//Draw the line
drawline(img, points1);
//now check the collected points
Mat img1 = cvLoadImage("desert.jpg");
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
int *t = new int;
IterateLine( img1, linePixels, points1[1], points1[0], t );
PlotMeNow(&linePixels[0], t[0]);
show_histogram_image(img);
delete t;
_getch();
return 0;
}
This is one of the bad smells in your code:
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
...
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
Now image is a CV_8UC3 image (from Mat img1 = cvLoadImage("desert.jpg");, but you are accessing here like it is CV_16UC1, so what gets put in linePixels is garbage. This will almost certainly cause PlotMeNow() to draw outside its image and corrupt something, which is probably why your code is crashing.
Sine it is very unclear what your code is trying to do, I can't suggest what you should have here instead.
I have just managed to do this, you only have to put "-1" to your loop limit:
for (unsigned int i = 0; i < nSamples-1; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}

EM clustering based background foreground segmentation in OPENCV

I tried to perform EM based back ground foreground segmentation using a code below...which I also found in Stackoverflow....But seems there is some error somewhere as I dont ever see the second printf statement to get executed... . basically it is never reaching the classification/clustering part of the code..The code is given below..Could someone help me on this ?
#include <opencv2/opencv.hpp>
#include <opencv2/legacy/legacy.hpp>
char str1[60];
int main()
{
cv::Mat source = cv::imread("C:\\Image Input\\part1.bmp" );
if(!source.data)
printf(" No data \n");
//ouput images
cv::Mat meanImg(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg(source.rows, source.cols, CV_8UC3);
//convert the input image to float
cv::Mat floatSource;
source.convertTo(floatSource, CV_32F);
//now convert the float image to column vector
cv::Mat samples(source.rows * source.cols, 3, CV_32FC1);
int idx = 0;
for (int y = 0; y < source.rows; y++) {
cv::Vec3f* row = floatSource.ptr<cv::Vec3f > (y);
for (int x = 0; x < source.cols; x++) {
samples.at<cv::Vec3f > (idx++, 0) = row[x];
}
}
printf(" After Loop \n");
//we need just 2 clusters
cv::EMParams params(2);
cv::ExpectationMaximization em(samples, cv::Mat(), params);
//the two dominating colors
cv::Mat means = em.getMeans();
//the weights of the two dominant colors
cv::Mat weights = em.getWeights();
//we define the foreground as the dominant color with the largest weight
const int fgId = weights.at<float>(0) > weights.at<float>(1) ? 0 : 1;
printf(" After Training \n");
//now classify each of the source pixels
idx = 0;
for (int y = 0; y < source.rows; y++)
{
printf(" Now Classify\n");
for (int x = 0; x < source.cols; x++)
{
//classify
const int result = cvRound(em.predict(samples.row(idx++), NULL));
//get the according mean (dominant color)
const double* ps = means.ptr<double>(result, 0);
//set the according mean value to the mean image
float* pd = meanImg.ptr<float>(y, x);
//float images need to be in [0..1] range
pd[0] = ps[0] / 255.0;
pd[1] = ps[1] / 255.0;
pd[2] = ps[2] / 255.0;
//set either foreground or background
if (result == fgId) {
fgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
} else {
bgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
}
}
}
printf(" Show Images \n");
cv::imshow("Means", meanImg);
cv::imshow("Foreground", fgImg);
cv::imshow("Background", bgImg);
cv::waitKey(0);
return 0;
}
The code works fine. I think that you use too large images, and learning takes too long time. Try process small images.
Just 1 correction, initialize images with zeros:
//ouput images
cv::Mat meanImg=Mat::zeros(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);

Resources