I'm pretty new to OpenCV and I wanted to implement houghlines for a project. I pulled the houghlines.cpp from the OpenCV Docs. When I run the source file I seem to get an error. I run it on Visual Studios 15 and am using OpenCV 3.1. I don't really know much about Cuda and have just been introduced into the world of OpenCV, so I do require a more thorough guidance. Thank You.
#include <cmath>
#include <iostream>
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/cudaimgproc.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
static void help()
{
cout << "This program demonstrates line finding with the Hough transform." << endl;
cout << "Usage:" << endl;
cout << "./gpu-example-houghlines <image_name>, Default is ../data/pic1.png\n" << endl;
}
int main(int argc, const char* argv[])
{
const string filename = argc >= 2 ? argv[1] : "../data/pic1.png";
Mat src = imread(filename, IMREAD_GRAYSCALE);
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat mask;
cv::Canny(src, mask, 100, 200, 3);
Mat dst_cpu;
cv::cvtColor(mask, dst_cpu, COLOR_GRAY2BGR);
Mat dst_gpu = dst_cpu.clone();
vector<Vec4i> lines_cpu;
{
const int64 start = getTickCount();
cv::HoughLinesP(mask, lines_cpu, 1, CV_PI / 180, 50, 60, 5);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "CPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "CPU Found : " << lines_cpu.size() << endl;
}
for (size_t i = 0; i < lines_cpu.size(); ++i)
{
Vec4i l = lines_cpu[i];
line(dst_cpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, LINE_AA);
}
GpuMat d_src(mask);
GpuMat d_lines;
{
const int64 start = getTickCount();
Ptr<cuda::HoughSegmentDetector> hough = cuda::createHoughSegmentDetector(1.0f, (float)(CV_PI / 180.0f), 50, 5);
hough->detect(d_src, d_lines);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "GPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "GPU Found : " << d_lines.cols << endl;
}
vector<Vec4i> lines_gpu;
if (!d_lines.empty())
{
lines_gpu.resize(d_lines.cols);
Mat h_lines(1, d_lines.cols, CV_32SC4, &lines_gpu[0]);
d_lines.download(h_lines);
}
for (size_t i = 0; i < lines_gpu.size(); ++i)
{
Vec4i l = lines_gpu[i];
line(dst_gpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, LINE_AA);
}
imshow("source", src);
imshow("detected lines [CPU]", dst_cpu);
imshow("detected lines [GPU]", dst_gpu);
waitKey();
return 0;
}
Error LNK2019
unresolved external symbol "struct cv::Ptr __cdecl cv::cuda::createHoughSegmentDetector(float,float,int,int,int)" (?createHoughSegmentDetector#cuda#cv##YA?AU?$Ptr#VHoughSegmentDetector#cuda#cv###2#MMHHH#Z) referenced in function main
An additional library must be linked when compiling.
In Windows, the library name is opencv_cudaimgproc310.lib. If one is using Visual Studio, the library name must be added at [Configuration Properties] -> [Linker] -> [Input] -> [Additional Dependencies].
In Linux, it is typically libopencv_cudaimgproc.so, which is a symbolic link to libopencv_cudaimgproc.so.3.1, which in turn is a symbolic link to libopencv_cudaimgproc.so.3.1.0, which is the actual library. If one is using g++, -lopencv_cudaimgproc must be added to g++ command.
I'm assuming that, in both environment, library search path is set properly, that is, it contains path to the OpenCV libraries.
Related
I used opencv dnn classification, but the result do not match the caffe prediction. What confused me was that some images could get similar result to caffe,a small number of images not.When I changed BGR to RGB, Most of the results ware wrong.
similar result:
different result:
blobFromImage(norm_img, 1.0, cv::Size(64, 64));when used default parameters changed BGR to RGB ,but the result would wrong .so I used like this blobFromImage(norm_img, 1.0, cv::Size(64, 64), cv::Scalar(),false); .most of result would matched caffe prediction,why a small number of images not?
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/utils/trace.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
/* Find best class for the blob (i. e. class with maximal probability) */
static void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
static std::vector<String> readClassNames(const char *filename = "./config/type.txt")
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back(name.substr(name.find(' ') + 1));
}
fp.close();
return classNames;
}
int main(int argc, char **argv)
{
CV_TRACE_FUNCTION();
String modelTxt = "./config/HCCR3755_res20_deploy.prototxt";
String modelBin = "./config/HCCR3755-res20_iter_790000.caffemodel";
String imageFile = "./config/b9.jpg";
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
if (net.empty())
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
exit(-1);
}
Mat img = imread(imageFile);
FileStorage fs("./config/mean.xml", FileStorage::READ);
Mat _mean;
fs["vocabulary"] >> _mean;
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
cv::Mat img_resize;
resize(img, img_resize, Size(64, 64));
cv::Mat img_float;
img_resize.convertTo(img_float, CV_32FC3);
cv::Mat norm_img;
cv::subtract(img_float, _mean, norm_img);
Mat inputBlob = blobFromImage(norm_img, 1.0, cv::Size(64, 64), cv::Scalar(),false); //Convert Mat to batch of images
Mat prob;
cv::TickMeter t;
for (int i = 0; i < 1; i++)
{
CV_TRACE_REGION("forward");
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//! [Set input blob]
t.start();
//! [Make forward pass]
prob = net.forward("prob");
//std::cout << prob << std::endl;//compute output
//! [Make forward pass]
t.stop();
}
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb);//find the best class
//! [Gather output]
//! [Print results]
std::vector<String> classNames = readClassNames();
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
//! [Print results]
std::cout << "Time: " << (double)t.getTimeMilli() / t.getCounter() << " ms (average from " << t.getCounter() << " iterations)" << std::endl;
getchar();
return 0;
} //main
Is there a way of initializing a opencv cv::Mat using a vector<float> object?
Or do I need to loop over every entry of the vector and write it into the cv::Mat object?
I wrote the following test code ( including #Miki 's comment ) to myself to understand in detail.
you will understand well when you test it.
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
vector<float> vec{0.1,0.9,0.2,0.8,0.3,0.7,0.4,0.6,0.5,1};
Mat m1( vec );
imshow("m1",m1);
waitKey();
Mat m2( 1,vec.size(), CV_32FC1,vec.data());
imshow("m2",m2);
waitKey();
Mat1f m3( vec.size(), 1, vec.data());
imshow("m3",m3);
waitKey();
Mat1f m4( 1, vec.size(), vec.data());
imshow("m4",m4);
waitKey();
cout << "as seen below all Mat and vector use same data" << endl;
cout << vec[0] << endl;
m1 *= 2;
cout << vec[0] << endl;
m2 *= 2;
cout << vec[0] << endl;
m3 *= 2;
cout << vec[0] << endl;
m4 *= 2;
cout << vec[0] << endl;
return 0;
}
I am running canny edge example in Visual Studio 2015 and i got this error.
The application was unable to start correctly (0xc000007b).
And then visual studio show to this error.
Unhandled exception at 0x77A2D5B2 (ntdll.dll) in Canny Edge.exe: 0xC000007B: %hs is either not designed to run on Windows or it contains an error. Try installing the program again using the original installation media or contact your system administrator or the software vendor for support. Error status 0x.
I quite sure this coding is working as i ran this coding before in Visual Studio 2013. Here is my coding.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <algorithm>
using namespace cv;
using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.jpg\n" << endl;
}
bool less_by_y(const cv::Point& lhs, const cv::Point& rhs)
{
return lhs.y < rhs.y;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.jpg";
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Rect roi;
Mat src = imread("test_4_1.png");
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
findContours(dst, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
//vector<Vec2f> lines;
//HoughLines(dst, lines, 1, CV_PI / 180, 50, 0, 0);
//for (size_t i = 0; i < lines.size(); i++)
//{
// float rho = lines[i][0], theta = lines[i][1];
// Point pt1, pt2;
// double a = cos(theta), b = sin(theta);
// double x0 = a*rho, y0 = b*rho;
// pt1.x = cvRound(x0 + 1000 * (-b));
// pt1.y = cvRound(y0 + 1000 * (a));
// pt2.x = cvRound(x0 - 1000 * (-b));
// pt2.y = cvRound(y0 - 1000 * (a));
// line(cdst, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
// cout << pt1 << " " << pt2 << endl;
//}
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI / 180, 30, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 1, CV_AA);
cout << l << endl;
}
cout << endl << lines.size() << endl;
cout << arcLength(contours[0], true) << endl;
cout << dst.size() << endl << endl;
for (int a = 0; a < contours[0].size(); a++){
cout << contours[0][a] << " ";
}
vector<Point> test = contours[0];
auto mmx = std::minmax_element(test.begin(), test.end(), less_by_y);
cout << endl << *mmx.first._Ptr << endl << *mmx.second._Ptr;
vector<Point> test2 = contours[1];
auto mmx_1 = std::minmax_element(test2.begin(), test2.end(), less_by_y);
cout << endl << *mmx_1.first._Ptr << endl << *mmx_1.second._Ptr;
imshow("source", src);
imshow("detected lines", cdst);
/* ROI by creating mask for the parallelogram */
Mat mask = cvCreateMat(dst.size().height, dst.size().width, CV_8UC1);
// Create black image with the same size as the original
for (int i = 0; i < mask.cols; i++)
for (int j = 0; j < mask.rows; j++)
mask.at<uchar>(Point(i, j)) = 0;
cout <<endl<<endl<< *mmx.first._Ptr << *mmx.second._Ptr << *mmx_1.first._Ptr << *mmx_1.second._Ptr << endl;
// Create Polygon from vertices
vector<Point> ROI_Vertices = { *mmx.first._Ptr, *mmx.second._Ptr, *mmx_1.first._Ptr, *mmx_1.second._Ptr};
vector<Point> ROI_Poly;
approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, false);
// Fill polygon white
fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
cout << ROI_Poly.size() << endl;
// Create new image for result storage
Mat imageDest = cvCreateMat(dst.size().height, dst.size().width, CV_8UC3);
// Cut out ROI and store it in imageDest
src.copyTo(imageDest, mask);
imshow("mask", mask);
imshow("image", imageDest);
waitKey();
return 0;
}
Actually my comment is the answer, with some additions
What OpenCV Libs are you linking to? Are you linking to vs12? Because
you need to upgrade your linker to vs13 for MSVS 2015
OpenCV Doesn't come with Visual Studio 15 pre-builds, so you need to build OpenCV yourself for VS2015
This person seems to have had a similar problem and talks you through how to compile for VS2015
Iam working on face detection in openCV with HAAR classifier. Here is my code
#include "stdafx.h"
#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv\cv.h>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void detectAndDisplay(Mat frame);
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eye_cascade;
string window_name = "Capture- Face detection";
int _tmain(int argc, _TCHAR* argv[])
{
Mat frame = imread("C:/Users/Public/Pictures/Sample Pictures/lena.png");
imshow("original picture", frame);
if (face_cascade.load(face_cascade_name))
{
cout << "\n Error loading " << endl;
}
if (eye_cascade.load(eyes_cascade_name))
{
cout << "\n Error Loading " << endl;
}
if (!frame.empty())
{
detectAndDisplay(frame);
}
waitKey(0);
return 0;
}
void detectAndDisplay(Mat frame)
{
vector<Rect>faces;
imshow("lena.png", frame);
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
imshow("Gray Color Image", frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(20, 20));
int k = faces.size();
for (size_t i = 0; i <faces.size(); i++)
{
Point center(faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5);
ellipse(frame, center, Size(faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar(255, 0, 0));
Mat faceROI = frame_gray(faces[i]);
vector<Rect>eyes;
eye_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (int j = 0; j < eyes.size(); j++)
{
Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, center, radius, Scalar(0, 0,255), 4, 8, 0);
}
}
imshow(window_name, frame);
}
Here My code is not working. face is not being detected. iam trying get int k=faces.size() which is getting '0' value. what could be the possible error.
That is because your if-condition to load the cascade is not logic:
Yours is:
if (face_cascade.load(face_cascade_name))
{
cout << "\n Error loading " << endl;
}
if (eye_cascade.load(eyes_cascade_name))
{
cout << "\n Error Loading " << endl;
}
But it should be:
if (!face_cascade.load(face_cascade_name))
{
cout << "\n Error loading " << endl;
}
if (!eye_cascade.load(eyes_cascade_name))
{
cout << "\n Error Loading " << endl;
}
You forget the '!' in the conditions.
face_cascade.load(face_cascade_name) obviously returns FALSE when classifier successfully loaded. You can find it in one of the samples provided with OpenCV library facedetect.cpp
This is why you don't see that the loading process failed and you provided wrong path to haarcascade file.
I am a newbie to OpenCV, so pls bear with me.. I am trying to dump the histogram Mat object for the given image.. It fails with the below error - Any help appreciated...
The first cout in the below program i.e of the loaded image prints successfully - While the second cout of the hist of the image fails with the below error
OpenCV Error: Assertion failed (m.dims <= 2) in FormattedImpl, file /mycode/ws/opencv/opencv-3.0.0-beta/modules/core/src/out.cpp, line 86
libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /mycode/ws/opencv/opencv-3.0.0-beta/modules/core/src/out.cpp:86: error: (-215) m.dims <= 2 in function FormattedImpl
Here is the complete code
#include <stdio.h>
#include <string>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(int argc, char** argv) {
if (argc != 2) {
printf("usage: opencv.out <Image_Path>\n");
return -1;
}
string imagePath = (argv[1]);
cout << "loading image..." << imagePath << endl;
Mat image = imread(imagePath, 1);
Mat hist;
int imgCount = 1;
int dims = 3;
const int histSizes[] = {4, 4, 4};
const int channels[] = {0, 1, 2};
float rRange[] = {0, 256};
float gRange[] = {0, 256};
float bRange[] = {0, 256};
const float *ranges[] = {rRange, gRange, bRange};
Mat mask = Mat();
calcHist(&image, imgCount, channels, mask, hist, dims, histSizes, ranges);
cout << image << "Loaded image..." << endl;
cout << "Hist of image..." << hist;
return 0;
}
Based on the OpenCV 2.4.9 source code:
static inline std::ostream& operator << (std::ostream& out, const Mat& mtx)
{
Formatter::get()->write(out, mtx);
return out;
}
Is the function you are calling when using << operator. Formatter::get() returns appropriate
formatter class based on the programming language you are using.
write() function basicly calls:
static void writeMat(std::ostream& out, const Mat& m, char rowsep, char elembrace, bool singleLine)
{
CV_Assert(m.dims <= 2);
int type = m.type();
char crowbrace = getCloseBrace(rowsep);
char orowbrace = crowbrace ? rowsep : '\0';
if( orowbrace || isspace(rowsep) )
rowsep = '\0';
for( int i = 0; i < m.rows; i++ )
{
if(orowbrace)
out << orowbrace;
if( m.data )
writeElems(out, m.ptr(i), m.cols, type, elembrace);
if(orowbrace)
out << crowbrace << (i+1 < m.rows ? ", " : "");
if(i+1 < m.rows)
{
if(rowsep)
out << rowsep << (singleLine ? " " : "");
if(!singleLine)
out << "\n ";
}
}
}
As you can see if your Mat dimensionality is greater than 2 assertion will be thrown like in your code (CV_Assert(m.dims<=2)).
calcHist() with the parameters you gave produces 3-dimentional Mat and thus it cannot be displayed using << operator
By calling calcHist() function that way you are getting 3-dimentional histogram and I don't see a simple solution to visualize that in OpenCV (which doesn't mean it can't be done). If it's something you must do I would suggest to look into OpenGL for 3D data visualization. If not you could simply call this function for each channel seperatly - you will get 3 one-dimenational histograms which you can print using << operator.