Is there a way of initializing a opencv cv::Mat using a vector<float> object?
Or do I need to loop over every entry of the vector and write it into the cv::Mat object?
I wrote the following test code ( including #Miki 's comment ) to myself to understand in detail.
you will understand well when you test it.
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
vector<float> vec{0.1,0.9,0.2,0.8,0.3,0.7,0.4,0.6,0.5,1};
Mat m1( vec );
imshow("m1",m1);
waitKey();
Mat m2( 1,vec.size(), CV_32FC1,vec.data());
imshow("m2",m2);
waitKey();
Mat1f m3( vec.size(), 1, vec.data());
imshow("m3",m3);
waitKey();
Mat1f m4( 1, vec.size(), vec.data());
imshow("m4",m4);
waitKey();
cout << "as seen below all Mat and vector use same data" << endl;
cout << vec[0] << endl;
m1 *= 2;
cout << vec[0] << endl;
m2 *= 2;
cout << vec[0] << endl;
m3 *= 2;
cout << vec[0] << endl;
m4 *= 2;
cout << vec[0] << endl;
return 0;
}
Related
I'm pretty new to OpenCV and I wanted to implement houghlines for a project. I pulled the houghlines.cpp from the OpenCV Docs. When I run the source file I seem to get an error. I run it on Visual Studios 15 and am using OpenCV 3.1. I don't really know much about Cuda and have just been introduced into the world of OpenCV, so I do require a more thorough guidance. Thank You.
#include <cmath>
#include <iostream>
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/cudaimgproc.hpp"
using namespace std;
using namespace cv;
using namespace cv::cuda;
static void help()
{
cout << "This program demonstrates line finding with the Hough transform." << endl;
cout << "Usage:" << endl;
cout << "./gpu-example-houghlines <image_name>, Default is ../data/pic1.png\n" << endl;
}
int main(int argc, const char* argv[])
{
const string filename = argc >= 2 ? argv[1] : "../data/pic1.png";
Mat src = imread(filename, IMREAD_GRAYSCALE);
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat mask;
cv::Canny(src, mask, 100, 200, 3);
Mat dst_cpu;
cv::cvtColor(mask, dst_cpu, COLOR_GRAY2BGR);
Mat dst_gpu = dst_cpu.clone();
vector<Vec4i> lines_cpu;
{
const int64 start = getTickCount();
cv::HoughLinesP(mask, lines_cpu, 1, CV_PI / 180, 50, 60, 5);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "CPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "CPU Found : " << lines_cpu.size() << endl;
}
for (size_t i = 0; i < lines_cpu.size(); ++i)
{
Vec4i l = lines_cpu[i];
line(dst_cpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, LINE_AA);
}
GpuMat d_src(mask);
GpuMat d_lines;
{
const int64 start = getTickCount();
Ptr<cuda::HoughSegmentDetector> hough = cuda::createHoughSegmentDetector(1.0f, (float)(CV_PI / 180.0f), 50, 5);
hough->detect(d_src, d_lines);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "GPU Time : " << timeSec * 1000 << " ms" << endl;
cout << "GPU Found : " << d_lines.cols << endl;
}
vector<Vec4i> lines_gpu;
if (!d_lines.empty())
{
lines_gpu.resize(d_lines.cols);
Mat h_lines(1, d_lines.cols, CV_32SC4, &lines_gpu[0]);
d_lines.download(h_lines);
}
for (size_t i = 0; i < lines_gpu.size(); ++i)
{
Vec4i l = lines_gpu[i];
line(dst_gpu, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, LINE_AA);
}
imshow("source", src);
imshow("detected lines [CPU]", dst_cpu);
imshow("detected lines [GPU]", dst_gpu);
waitKey();
return 0;
}
Error LNK2019
unresolved external symbol "struct cv::Ptr __cdecl cv::cuda::createHoughSegmentDetector(float,float,int,int,int)" (?createHoughSegmentDetector#cuda#cv##YA?AU?$Ptr#VHoughSegmentDetector#cuda#cv###2#MMHHH#Z) referenced in function main
An additional library must be linked when compiling.
In Windows, the library name is opencv_cudaimgproc310.lib. If one is using Visual Studio, the library name must be added at [Configuration Properties] -> [Linker] -> [Input] -> [Additional Dependencies].
In Linux, it is typically libopencv_cudaimgproc.so, which is a symbolic link to libopencv_cudaimgproc.so.3.1, which in turn is a symbolic link to libopencv_cudaimgproc.so.3.1.0, which is the actual library. If one is using g++, -lopencv_cudaimgproc must be added to g++ command.
I'm assuming that, in both environment, library search path is set properly, that is, it contains path to the OpenCV libraries.
I used opencv dnn classification, but the result do not match the caffe prediction. What confused me was that some images could get similar result to caffe,a small number of images not.When I changed BGR to RGB, Most of the results ware wrong.
similar result:
different result:
blobFromImage(norm_img, 1.0, cv::Size(64, 64));when used default parameters changed BGR to RGB ,but the result would wrong .so I used like this blobFromImage(norm_img, 1.0, cv::Size(64, 64), cv::Scalar(),false); .most of result would matched caffe prediction,why a small number of images not?
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/utils/trace.hpp>
using namespace cv;
using namespace cv::dnn;
#include <fstream>
#include <iostream>
#include <cstdlib>
using namespace std;
/* Find best class for the blob (i. e. class with maximal probability) */
static void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix
Point classNumber;
minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
*classId = classNumber.x;
}
static std::vector<String> readClassNames(const char *filename = "./config/type.txt")
{
std::vector<String> classNames;
std::ifstream fp(filename);
if (!fp.is_open())
{
std::cerr << "File with classes labels not found: " << filename << std::endl;
exit(-1);
}
std::string name;
while (!fp.eof())
{
std::getline(fp, name);
if (name.length())
classNames.push_back(name.substr(name.find(' ') + 1));
}
fp.close();
return classNames;
}
int main(int argc, char **argv)
{
CV_TRACE_FUNCTION();
String modelTxt = "./config/HCCR3755_res20_deploy.prototxt";
String modelBin = "./config/HCCR3755-res20_iter_790000.caffemodel";
String imageFile = "./config/b9.jpg";
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
if (net.empty())
{
std::cerr << "Can't load network by using the following files: " << std::endl;
std::cerr << "prototxt: " << modelTxt << std::endl;
std::cerr << "caffemodel: " << modelBin << std::endl;
exit(-1);
}
Mat img = imread(imageFile);
FileStorage fs("./config/mean.xml", FileStorage::READ);
Mat _mean;
fs["vocabulary"] >> _mean;
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
}
cv::Mat img_resize;
resize(img, img_resize, Size(64, 64));
cv::Mat img_float;
img_resize.convertTo(img_float, CV_32FC3);
cv::Mat norm_img;
cv::subtract(img_float, _mean, norm_img);
Mat inputBlob = blobFromImage(norm_img, 1.0, cv::Size(64, 64), cv::Scalar(),false); //Convert Mat to batch of images
Mat prob;
cv::TickMeter t;
for (int i = 0; i < 1; i++)
{
CV_TRACE_REGION("forward");
//! [Set input blob]
net.setInput(inputBlob, "data"); //set the network input
//! [Set input blob]
t.start();
//! [Make forward pass]
prob = net.forward("prob");
//std::cout << prob << std::endl;//compute output
//! [Make forward pass]
t.stop();
}
int classId;
double classProb;
getMaxClass(prob, &classId, &classProb);//find the best class
//! [Gather output]
//! [Print results]
std::vector<String> classNames = readClassNames();
std::cout << "Best class: #" << classId << " '" << classNames.at(classId) << "'" << std::endl;
std::cout << "Probability: " << classProb * 100 << "%" << std::endl;
//! [Print results]
std::cout << "Time: " << (double)t.getTimeMilli() / t.getCounter() << " ms (average from " << t.getCounter() << " iterations)" << std::endl;
getchar();
return 0;
} //main
I have a vector that has lots of NaN's for x,y positions that I want to remove(doing some opencv work). I cannot figure out how to use remove_if to remove the NaNs(when used in conjunction with erase). I've seen lots of examples if the vector is float or int but not point2f. Any simple examples would be very helpful. Thanks.
You can use a lambda function, or a functor or a function pointer. This is an example with a lambda function:
#include <opencv2/opencv.hpp>
#include <algorithm>
#include <iostream>
#include <cmath>
using namespace cv;
using namespace std;
int main(int argc, char ** argv)
{
vector<Point2f> pts{ Point2f(1.f, 2.f), Point2f(3.f, sqrt(-1.0f)), Point2f(2.f, 3.f) };
cout << "Before" << endl;
for (const auto& p : pts) {
cout << p << " ";
}
cout << endl;
pts.erase(remove_if(pts.begin(), pts.end(), [](const Point2f& p)
{
// Check if a coordinate is NaN
return isnan(p.x) || isnan(p.y);
}), pts.end());
cout << "After" << endl;
for (const auto& p : pts) {
cout << p << " ";
}
cout << endl;
return 0;
}
That will print:
Before
[1, 2] [3, -1.#IND] [2, 3]
After
[1, 2] [2, 3]
I am trying to classify my images whether characters are printed on surface or not.
For doing it.
First I take surf features of images with real images and manually defect real images to try create bag of words to an xml file and then try to predict.
however unless I use absolutely different image or totally cropped image my SVM classifier predicts as it is correct.
those are the images I used for train
https://www.dropbox.com/sh/xked9ywnibzv3tt/AADC0lP4WYAo3ddEDgvHpFhha/negative?dl=0
Here is my code.
#include <stdio.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "opencv2/core/core.hpp"
#include<dirent.h>
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
Ptr<FeatureDetector> detector = FeatureDetector::create("SURF");
Ptr<DescriptorExtractor> descriptors = DescriptorExtractor::create("SURF");
string to_string(const int val) {
int i = val;
std::string s;
std::stringstream out;
out << i;
s = out.str();
return s;
}
Mat compute_features(Mat image) {
vector<KeyPoint> keypoints;
Mat features;
detector->detect(image, keypoints);
KeyPointsFilter::retainBest(keypoints, 1500);
descriptors->compute(image, keypoints, features);
return features;
}
BOWKMeansTrainer addFeaturesToBOWKMeansTrainer(String dir, BOWKMeansTrainer& bowTrainer) {
DIR *dp;
struct dirent *dirp;
struct stat filestat;
dp = opendir(dir.c_str());
Mat features;
Mat img;
string filepath;
#pragma loop(hint_parallel(4))
for (; (dirp = readdir(dp));) {
filepath = dir + dirp->d_name;
cout << "Reading... " << filepath << endl;
if (stat( filepath.c_str(), &filestat )) continue;
if (S_ISDIR( filestat.st_mode )) continue;
img = imread(filepath, 0);
features = compute_features(img);
bowTrainer.add(features);
}
return bowTrainer;
}
void computeFeaturesWithBow(string dir, Mat& trainingData, Mat& labels, BOWImgDescriptorExtractor& bowDE, int label) {
DIR *dp;
struct dirent *dirp;
struct stat filestat;
dp = opendir(dir.c_str());
vector<KeyPoint> keypoints;
Mat features;
Mat img;
string filepath;
#pragma loop(hint_parallel(4))
for (;(dirp = readdir(dp));) {
filepath = dir + dirp->d_name;
cout << "Reading: " << filepath << endl;
if (stat( filepath.c_str(), &filestat )) continue;
if (S_ISDIR( filestat.st_mode )) continue;
img = imread(filepath, 0);
detector->detect(img, keypoints);
bowDE.compute(img, keypoints, features);
trainingData.push_back(features);
labels.push_back((float) label);
}
cout << string( 100, '\n' );
}
int main() {
initModule_nonfree();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
TermCriteria tc(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 10, 0.001);
int dictionarySize = 1000;
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize, tc, retries, flags);
BOWImgDescriptorExtractor bowDE(descriptors, matcher);
string dir = "/positive/", filepath;
DIR *dp;
struct dirent *dirp;
struct stat filestat;
cout << "Add Features to KMeans" << endl;
addFeaturesToBOWKMeansTrainer("/positive/", bowTrainer);
addFeaturesToBOWKMeansTrainer("/negative/", bowTrainer);
cout << endl << "Clustering..." << endl;
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
Mat labels(0, 1, CV_32FC1);
Mat trainingData(0, dictionarySize, CV_32FC1);
cout << endl << "Extract bow features" << endl;
computeFeaturesWithBow("/positive/", trainingData, labels, bowDE, 1);
computeFeaturesWithBow("/negative/", trainingData, labels, bowDE, 0);
CvSVMParams params;
params.kernel_type=CvSVM::LINEAR;
params.svm_type=CvSVM::C_SVC;
params.gamma=5;
params.C=100;
params.term_crit=cvTermCriteria(CV_TERMCRIT_NUMBER,100,0.000001);
CvSVM svm;
cout << endl << "Begin training" << endl;
bool res =svm.train(trainingData,labels,cv::Mat(),cv::Mat(),params);
svm.save("classifier.xml");
//CvSVM svm;
svm.load("classifier.xml");
vector<KeyPoint> cameraKeyPoints;
Mat rotated = imread("test.jpg",0);
Mat featuresFromimage;
detector->detect(rotated, cameraKeyPoints);
bowDE.compute(rotated, cameraKeyPoints, featuresFromimage);
cout <<"anar:"<< svm.predict(featuresFromimage) << endl;
imshow("edges", rotated);
cvWaitKey(0);
return 0;
}
Question 1: since those images are too similiar how can I do prediction like
if similiarity > %80
"correct"
else
"defected"
Question 2 Since this character defection is too rare in a factory it is going to very very tough to get a lot of defected images to train. Manually create defect on this images is a correct solution ? if not what I can actually do ?
Question 3
What kind of preprocessing methods I can actually do on this kind of images to increase accuracy of SVM ?
thank you
I am having trouble if understanding certain coding i am sorry if this comes off as stupid but i have a code to capture a video from my webcam i want to get the RGB valuee from the frame, if this is impossible would have to to save a frame as a picture and then get values from it?
const char window_name[]="Webcam";
int main(int argc, char* argv[])
{
/* attempt to capture from any connected device */
CvCapture *capture=cvCaptureFromCAM(CV_CAP_ANY);
if(!capture)
{
printf("Failed to initialise webcam\n");
return -1;
}
/* create the output window */
cvNamedWindow(window_name, CV_WINDOW_NORMAL);
do
{
/* attempt to grab a frame */
IplImage *frame=cvQueryFrame(capture);
if(!frame)
{
printf("Failed to get frame\n");
break;
}
COLORREF myColAbsolute = GetPixel(frame, 10,10);//error in saying frame is not compatible with HDC.
cout << "Red - " << (int)GetRValue(myColAbsolute) << endl;
cout << "Green - " << (int)GetGValue(myColAbsolute) << endl;
cout << "Blue - " << (int)GetBValue(myColAbsolute) << endl;
/* show the frame */
cvShowImage(window_name, frame);
ha ! ( obviously caught with a copy & paste bummer )
GetPixel() is a windows function, not an opencv one. same for GetRValue() and sisters.
you'd use them in the native win32 api, to get a pixel from an HDC, but it won't work with opencv/highgui, since neither HDC, nor HWND are exposed.
since you're obviously a beginner(nothing wrong with that, again!) let me try to talk you out of using the old, 1.0 opencv api(IplImages, cv*Functions) as well,
you should be using the new one(cv::Mat, namespace cv::Functions) instead.
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
using namespace std;
int main()
{
Mat frame;
namedWindow("video", 1);
VideoCapture cap(0);
while ( cap.isOpened() )
{
cap >> frame;
if(frame.empty()) break;
int x=3, y=5;
// Ladies and Gentlemen, the PIXEL!
Vec3b pixel = frame.at<Vec3b>(y,x); // row,col, not x,y!
cerr << "b:" << int(pixel[0]) << " g:" << int(pixel[1]) << " r:" << int(pixel[2]) << endl;
imshow("video", frame);
if(waitKey(30) >= 0) break;
}
return 0;
}