I am working on an iOS project that is integrated with OpenCV.My desired output is something like this:
How can I detect Upper Body Portion (i.e: below neck to legs)?
I have done so far to achieve body detection is something like this..
If anyone has made this before.Please help me..
-(void)processImage:(Mat&)image
{
std::vector<cv::Rect> bodies;
Mat grayscaleFrame;
cvtColor(image, grayscaleFrame, CV_BGR2GRAY);
equalizeHist(grayscaleFrame, grayscaleFrame);
upperBodyCascade.detectMultiScale(grayscaleFrame, image, bodies, HaarOptions,cv::Size(30,30));
for (size_t i = 0; i < bodies.size(); i++)
{
rectangle(image, bodies[i], Scalar(255, 0, 255));
}
}
You can use an Haar Cascade Classifier loading the haarcascade_upperbody.xml
You can find an example here. You just need to change the loaded classifier.
The code as below:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay(Mat frame);
/** Global variables */
String upper_body_cascade_name = "path\\to\\haarcascade_upperbody.xml";
CascadeClassifier upper_body_cascade;
string window_name = "Capture - Upper Body detection";
RNG rng(12345);
/** #function main */
int main(int argc, const char** argv)
{
VideoCapture capture(0);
Mat frame;
//-- 1. Load the cascades
if (!upper_body_cascade.load(upper_body_cascade_name)){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
if (capture.isOpened())
{
while (true)
{
capture >> frame;
//-- 3. Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!"); break;
}
int c = waitKey(10);
if ((char)c == 'c') { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay(Mat frame)
{
std::vector<Rect> bodies;
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//-- Detect faces
upper_body_cascade.detectMultiScale(frame_gray, bodies, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (size_t i = 0; i < bodies.size(); i++)
{
rectangle(frame, bodies[i], Scalar(255, 0, 255));
}
//-- Show what you got
imshow(window_name, frame);
}
Related
Hello and thanks for your help.
I would like to test the use of shapes for matching in OpenCV and managed to do the matching part.
To locate the rotated shape, i tought the AffineTransformer Class would be the right choice. As I don't know how the matching would work internally, it would be nice if someone has a link where the proceedings are described.
As shawshank mentioned my following code throw an Assertion failed-error because the variable matches is empty when passed to estimateTransformation function. Does anybody know how to use this function in the right way -respectively what it really does?
#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
bool rotateImage(Mat src, Mat &dst, double angle)
{
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(src, dst, rot, bbox.size());
return 1;
}
static vector<Point> sampleContour( const Mat& image, int n=300 )
{
vector<vector<Point>> contours;
vector<Point> all_points;
findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// shuffel
random_shuffle(all_points.begin(), all_points.end());
vector<Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(void)
{
Mat img1, img2;
vector<Point> img1Points, img2Points;
float distSC, distHD;
// read images
string img1Path = "testimage.jpg";
img1 = imread(img1Path, IMREAD_GRAYSCALE);
rotateImage(img1, img2, 45);
imshow("original", img1);
imshow("transformed", img2);
waitKey();
// Contours
img1Points = sampleContour(img1);
img2Points = sampleContour(img2);
//Calculate Distances
Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();
distSC = mysc->computeDistance( img1Points, img2Points );
distHD = myhd -> computeDistance( img1Points, img2Points );
cout << distSC << endl << distHD << endl;
vector<DMatch> matches;
Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
transformerHD -> estimateTransformation(img1Points, img2Points, matches);
return 0;
}
I have used AffineTransformer class on a 2D image. Below is the basic code which will give you an idea of what it does.
// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;
int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};
int main()
{
// Prepare 'vector of points' from above hardcoded points
int sInd=0, eInd=arrSize;
vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));
// Create object of AffineTransformer
bool fullAffine = true; // change its value and see difference in result
auto aft = cv::createAffineTransformer(fullAffine);
// Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
std::vector<cv::DMatch> matches;
for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));
// Read image
Mat srcImg = imread("image1.jpg");
Mat tgtImg;
// estimate points transformation
aft->estimateTransformation(sourceP, tgtP, matches);
// apply transformation
aft->applyTransformation(sourceP, tgtP);
// warp image
aft->warpImage(srcImg, tgtImg);
// show generated output
imshow("warped output", tgtImg);
waitKey(0);
return 0;
}
I have recently used this piece of code to save frame data from a webcam
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <opencv2/opencv.hpp>
using namespace cv;
#include <fstream>
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
cap.set(CV_CAP_PROP_FPS, 15);
Mat edges;
namedWindow("image", 1);
std::vector<cv::Mat> images(100);
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
for (int i = 0; i < 100; ++i)
{
Mat frame;
cap >> frame; // get a new frame from camera
frame.copyTo(images[i]);
}
cap.release();
for (int i = 0; i < 100; ++i)
{
imshow("image", images[i]);
if (waitKey(30) >= 0) break;
}
After this, I want to use imread to analyse the newly splitted frames. However, I cannot think of a way to accomplish this.
I tried: Mat colorImage = imread(images[i]);
However, it leads to:
error C2664: 'cv::Mat cv::imread(const cv::String &,int)': cannot convert argument 1 from 'std::vector<cv::Mat,std::allocator<_Ty>>' to 'const cv::String &'
with
[
_Ty=cv::Mat
]
Thanks a lot in advance :)
imread function is used to open the image from disk.
You already have vector of images so you just do:
Mat colorImage = images[i];
and btw. there is no need for this:
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
because you are allocating new space anyway except you capture the frames directly like this:
cap >> images[i];
Why is webcam image processing is very slow while using Xcode for this OpenCV project, and only one out of three windows are working (similar spaces and HSV windows are not turning up) and are very slow? How to increase the speed of execution of the program?
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat img, hsv, res;
char *win1 = "RGB";
char *win2 = "HSV";
char *win3 = "similar spaces";
uchar thresh = 5;
void setColor(uchar hval){
int i,j;
for (i = 0; i < res.rows; ++i){
for (j = 0; j < res.cols; ++j){
if( hsv.at<Vec3b>(i,j)[0] <= hval+thresh
&& hsv.at<Vec3b>(i,j)[0] >= hval-thresh)
res.at<uchar>(i,j) = 255;
else res.at<uchar>(i,j) = 0;
}
}
imshow(win3, res);
}
void MouseCallBackFunc(int event, int x, int y, int flags, void* userdata){
if(event==EVENT_LBUTTONDOWN){
cout<<"\t x,y : "<<x<<','<<y<<endl;
cout<<'\t'<<img.at<Vec3b>(y,x)<<endl;
setColor(hsv.at<Vec3b>(y,x)[0]);
}
}
int main()
{
img = imread("/usr/share/opencv/samples/cpp/stuff.jpg", CV_LOAD_IMAGE_COLOR);
hsv = Mat::zeros(img.size(), CV_8UC3);
res = Mat::zeros(img.size(), CV_8UC1);
char c;
int i,j;
namedWindow(win2, CV_WINDOW_NORMAL);
namedWindow(win3, CV_WINDOW_NORMAL);
cvtColor(img, hsv, CV_RGB2HSV);
imshow(win1, img);
imshow(win2, hsv);
imshow(win3, res);
setMouseCallback(win1, MouseCallBackFunc, NULL);
// VideoCapture stream(0); //0 is the id of video device.0 if you have only one camera.
// if (!stream.isOpened()) { //check if video device has been initialised
// cout << "cannot open camera";
// }
// while (true) {
// Mat cameraFrame;
// stream.read(cameraFrame);
// imshow("test", cameraFrame);
// c = waitKey(30);
// if(c==27)
// break;
// }
while((c=waitKey(300))!=27){}
return 0;
}
I have code for detection of a face.. How do I save the video which is being captured? I'm posting the code for face detection below..
where do I insert the code which saves the the video?
thhis code works in detecting a face with ubuntu and opencv. Please do help!
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
VideoCapture cap;
cv::VideoWriter output_cap("/home/unifyadmin/Documents/MyVideo.avi",
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( 0 );
if( capture )
{
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
There are three library methods that you require:
VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
void VideoWriter::write(const Mat& image)
Here is a short example on how to use them:
// Do this near the start of your code
cv::VideoWriter output_cap(C:/MyVideo.avi,
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
Edit: - My last contribution to this question:
You need your creation of objects into the main function, that includes the VideoWriter
Im not sure how any of this code even works:
VideoCapture cap;
cv::VideoWriter output_cap("/home/unifyadmin/Documents/MyVideo.avi",
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
You need to think about what you are actually coding and where you are writing code. Don't just take examples from the internet and try and stick them together.
Hi i am doing a project to do an image 3d reconstruction. I am the phase of calibrating the camera, which is taking a long time to do. But when i compile the code and display the checkerboard in front of the camera it goes straight to exception error unhandled.
When picture not in frame, no error as soon as it gets in the frame, unhandled error occurs i don't know why.
I have asked a lot of people, no body can seem to help.
here is my code
#include <cv.h>
#include <highgui.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
int numBoards = 0;
int numCornersHor;
int numCornersVer;
printf("Enter number of corners along width: ");
scanf("%d", &numCornersHor);
printf("Enter number of corners along height: ");
scanf("%d", &numCornersVer);
printf("Enter number of boards: ");
scanf("%d", &numBoards);
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
VideoCapture capture = VideoCapture(0);
vector<vector<Point3d>> object_points;
vector<vector<Point2d>> image_points;
vector<Point2d> corners;
int successes=0;
Mat image;
Mat gray_image;
capture >> image;
vector<Point3d> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(Point3d(j/numCornersHor, j%numCornersHor, 0.0f));
while(successes<numBoards)
{
cvtColor(image, gray_image, CV_BGR2GRAY);
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
}
imshow("win1", image);
imshow("win2", gray_image);
capture >> image;
int key = waitKey(1);
if(key==27)
return 0;
if(key==' ' && found!=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
printf("Snap stored!\n");
successes++;
if(successes>=numBoards)
break;
}
}
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
Mat imageUndistorted;
while(1)
{
capture >> image;
undistort(image, imageUndistorted, intrinsic, distCoeffs);
imshow("win1", image);
imshow("win2", imageUndistorted);
waitKey(1);
}
capture.release();
return 0;
}
the error i get on the console is
OpenCV ERROR: Assertion failed (ncorners >=0 && corners.depth() == CV_32F) in unknown function file , file .....\src\opencv\modules\imgproc\src\cornersubpix.cpp, line 257.
and the error dialog says
Unhandled exception at 0x769afc16 in basiccalibration.exe: Microsoft C++ exception: cv::Exception at memory location 0x0021f51c..
Help would be appreciated.
Thanks
Use Point2f and Point3f instead of Point2d and Point3d. Read the assertion text please. It demands a CV_32F depth structure.