I'm using OpenCV to detect features and compute descriptors.
For feature detection I'm using FAST:
cv::Ptr<cv::FeatureDetector> _detector = cv::FastFeatureDetector::create(_configuration.threshold,
_configuration.nonmaxSuppression);
For descriptors I'm using BRIEF:
cv::Ptr<cv::DescriptorExtractor> _descriptor_extractor = cv::xfeatures2d::BriefDescriptorExtractor::create();
After that, I'd like to order keypoints based on their response and store just a certain number of them:
typedef std::map<float,cv::KeyPoint,std::greater<float> > ResponseKeypointMap;
// keypoint buffer
std::vector<cv::KeyPoint> keypoints;
cv::Mat descriptors;
// detect keypoints
_detector->detect(rgb_image_, keypoints);
const int keypoints_size = keypoints.size();
if(!keypoints_size){
std::cerr << "warning: [PointDetector] found 0 keypoints!\n";
return;
}
ResponseKeypointMap keypoints_map;
for(int i=0; i < keypoints_size; ++i){
keypoints_map.insert(std::make_pair(keypoints[i].response,keypoints[i]));
}
int iterations = std::min(_configuration.max_keypoints_size,keypoints_size);
std::vector<cv::KeyPoint> filtered_keypoints;
filtered_keypoints.resize(iterations);
int k=0;
for(ResponseKeypointMap::iterator it = keypoints_map.begin();
it != keypoints_map.end();
++it){
filtered_keypoints[k] = it->second;
k++;
if(k>=iterations)
break;
}
std::cerr << "filtered keypoints size: " << filtered_keypoints.size() << std::endl;
_descriptor_extractor->compute(rgb_image_, filtered_keypoints, descriptors);
std::cerr << "Computed " << descriptors.rows << "x" << descriptors.cols << " descriptors" << std::endl;
I don't know why I'm giving 100 keypoints to the DescriptorExtractor, but I'm recieving 55 descriptors.
I'd be very grateful if you could explain me what is happening.
Thanks.
According to OpenCV documentation https://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html
DescriptorExtractor::compute(const Mat& image, vector& keypoints, Mat& descriptors)
...
keypoints – Input collection of keypoints. Keypoints for which a
descriptor cannot be computed are removed and the remaining ones may
be reordered. Sometimes new keypoints can be added, for example: SIFT
duplicates a keypoint with several dominant orientations (for each
orientation).
...
So, after execution of compute method, your filtered_keypoints vector is altered and you have new pair of keypoints and descriptors, both of size 55.
In, particular, FAST draws a diameter-7 ring around each test-point to determine if it is a keypoint, but BRIEF uses 256 points around the test-point. I don't know if BRIEF uses a square area or a circular area but, either way, it is bigger, and so FAST may find keypoints that are too close to the boundary of the image for BRIEF to be able to calculate the description.
Related
I'm trying to get opencv camera calibration working but having trouble getting it to output valid data. I have an uncalibrated camera that I would like to calibrate, but to test my code I am using an Azure Kinect camera (the color camera), since the SDK supplies the correct intrinsics for it and I can verify them. I've collected 30 images of a chessboard from slightly different angles, which I understand should be sufficient, and run the calibration function, but no matter what flags I pass in I get values for fx and fy that are pretty different from the correct fx and fy, and distortion coefficients that are WILDLY different. Am I doing something wrong? Do I need more or better data?
A sample of the images I'm using can be found here: https://www.dropbox.com/sh/9pa94uedoe5mlxz/AABisSvgWwBT-bY65lfzp2N3a?dl=0
Save them in c:\calibration_test to run the code below.
#include <filesystem>
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
using namespace std;
namespace fs = experimental::filesystem;
static bool extractCorners(cv::Mat colorImage, vector<cv::Point3f>& corners3d, vector<cv::Point2f>& corners)
{
// Each square is 20x20mm
const float kSquareSize = 0.020f;
const cv::Size boardSize(7, 9);
const cv::Point3f kCenterOffset((float)(boardSize.width - 1) * kSquareSize, (float)(boardSize.height - 1) * kSquareSize, 0.f);
cv::Mat image;
cv::cvtColor(colorImage, image, cv::COLOR_BGRA2GRAY);
int chessBoardFlags = cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE;
if (!cv::findChessboardCorners(image, boardSize, corners, chessBoardFlags))
{
return false;
}
cv::cornerSubPix(image, corners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
// Construct the corners
for (int i = 0; i < boardSize.height; ++i)
for (int j = 0; j < boardSize.width; ++j)
corners3d.push_back(cv::Point3f(j * kSquareSize, i * kSquareSize, 0) - kCenterOffset);
return true;
}
int main()
{
vector<cv::Mat> frames;
for (const auto& p : fs::directory_iterator("c:\\calibration_test\\"))
{
frames.push_back(cv::imread(p.path().string()));
}
int numFrames = (int)frames.size();
vector<vector<cv::Point2f>> corners(numFrames);
vector<vector<cv::Point3f>> corners3d(numFrames);
int framesWithCorners = 0;
for (int i = 0; i < numFrames; ++i)
{
if (extractCorners(frames[i], corners3d[framesWithCorners], corners[framesWithCorners]))
{
++framesWithCorners;
}
}
numFrames = framesWithCorners;
corners.resize(numFrames);
corners3d.resize(numFrames);
// Camera intrinsics come from the Azure Kinect API
cv::Matx33d cameraMatrix(
914.111755f, 0.f, 960.887390f,
0.f, 913.880615f, 551.566528f,
0.f, 0.f, 1.f);
vector<float> distCoeffs = { 0.576340079f, -2.71203661f, 0.000563957903f, -0.000239689150f, 1.54344523f, 0.454746544f, -2.53860712f, 1.47272563f };
cv::Size imageSize = frames[0].size();
vector<cv::Point3d> rotations;
vector<cv::Point3d> translations;
int flags = cv::CALIB_USE_INTRINSIC_GUESS | cv::CALIB_FIX_PRINCIPAL_POINT | cv::CALIB_RATIONAL_MODEL;
double result = cv::calibrateCamera(corners3d, corners, imageSize, cameraMatrix, distCoeffs, rotations, translations,
flags);
// After this call, cameraMatrix has different values for fx and fy, and WILDLY different distortion coefficients.
cout << "fx: " << cameraMatrix(0, 0) << endl;
cout << "fy: " << cameraMatrix(1, 1) << endl;
cout << "cx: " << cameraMatrix(0, 2) << endl;
cout << "cy: " << cameraMatrix(1, 2) << endl;
for (size_t i = 0; i < distCoeffs.size(); ++i)
{
cout << "d" << i << ": " << distCoeffs[i] << endl;
}
return 0;
}
Some sample output is:
fx: 913.143
fy: 917.965
cx: 960.887
cy: 551.567
d0: 0.327596
d1: -73.1837
d2: -0.00125972
d3: 0.002805
d4: -7.93086
d5: 0.295437
d6: -73.481
d7: -3.25043
d8: 0
d9: 0
d10: 0
d11: 0
d12: 0
d13: 0
Any idea what I'm doing wrong?
Bonus question: Why do I get 14 distortion coefficients back instead of 8? If I leave off CALIB_RATIONAL_MODEL then I only get 5 (three radial and two tangential).
You need to take images from the whole field of view of the camera to correctly capture the lens distortion characteristics. The images you provide only show the chessboad in one position, slightly angled.
Ideally you should have images of the chessboard evenly distributed over the x and y axis of the image plane, right up to the edges of the image. Make sure sufficient white boarder around the board is always visible though for detection robustness.
You should also try to capture images where the chessboard is nearer to the camera and farther away, not just a uniform distance. The different angles you provide look good on the other hand.
You can find an extensive guide how to ensure good calibration results in this answer: How to verify the correctness of calibration of a webcam?
Comparing your camera matrix to the one coming from Azure Kinect API it doesn't look so bad. The principle point is pretty spot on and the focal length is in a reasonable range. If you improve the quality of the input with my tips and the SO answer I have provided the results should be even closer. Comparing sets of distortion coefficients by their distance doesn't really work that well, the error function is not convex so you can have lots of local minima that produce relatively good results but they are far from the global minimum that would yield the best results. If that explanation makes sense to you.
Regarding your bonus question: I only see 8 values filled in in the output you return, the rest is 0 so doesn't have any influence. I'm not sure if the output is expected to be different from that function.
My goal is to use an SVM w/ HOG features to classify vehicles in traffic under sedans and SUVs.
I've used various kernels (RBF, LINEAR, POLY) and each give different results, but they give the same results no matter the parameters changed. For example, if I am using a POLY kernel and the degree is greater than or equal to .65 it will classify everything as an SUV, if its less than .65 then it will classify all my testing images as sedans.
With a LINEAR kernel, the only parameter changed is C. No matter what the parameter C is, I always get 8/10 images classified as sedans and the same 2 classified as SUVs.
Now I only have about 70 training images and 10 testing images, I haven't been able to find a good dataset of vehicles from the rear and up like from a bridge that I will be using this for. Could the problem be due to this small dataset, or the parameters, or something else? Also, I see that my support vectors are usually very high, like 58 out of the 70 training images, so that may be a problem with the dataset? Is there a way for me to visualize the training points somehow--in the SVM examples they always have a nice 2D plot of points and draw a line through it, but is there a way to plot those points with images so I can see if my data is linearly separable and make adjustments accordingly? Are my HOG parameters accurate for a 150x200 image of a car?
Also note that when I use testing images that are the same as training images, then the SVM model predicts perfectly, but obviously that's cheating.
The following image shows the result, and an example of a testing image
Here is my code, I didn't include most of it because I'm not sure the code is the problem. First I take the positive images, extract HOG features, then load them into the training Mat, and then do the same for the negative images in the same way that I do for the included testing part.
//Set SVM Parameters (not sure about these values, but just wanna see something)
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::POLY);
svm->setC(50);
svm->setGamma(100);
svm->setDegree(.65);
//svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
cout << "Parameters Set..." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labels_mat);
Mat SV = svm->getSupportVectors();
Mat USV = svm->getUncompressedSupportVectors();
cout << "Support Vectors: " << SV.rows << endl;
cout << "Uncompressed Support Vectors: " << USV.rows << endl;
cout << "Training Successful" << endl;
waitKey(0);
//TESTING PORTION
cout << "Begin Testing..." << endl;
int num_test_images = 10;
Mat HOGFeat_test(1, derSize, CV_32FC1); //Creates a 1 x descriptorSize Mat to house the HoG features from the test image
for (int file_count = 1; file_count < (num_test_images + 1); file_count++)
{
test << nameTest << file_count << type; //'Test_1.jpg' ... 'Test_2.jpg' ... etc ...
string filenameTest = test.str();
test.str("");
Mat test_image = imread(filenameTest, 0); //Read the file folder
HOGDescriptor hog_test;// (Size(64, 64), Size(32, 32), Size(16, 16), Size(32, 32), 9, 1, -1, 0, .2, 1, 64, false);
vector<float> descriptors_test;
vector<Point> locations_test;
hog_test.compute(test_image, descriptors_test, Size(64, 64), Size(0, 0), locations_test);
for (int i = 0; i < descriptors_test.size(); i++)
HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);
namedWindow("Test Image", CV_WINDOW_NORMAL);
imshow("Test Image", test_image);
//Should return a 1 if its an SUV, or a -1 if its a sedan
float result = svm->predict(HOGFeat_test);
if (result <= 0)
cout << "Sedan" << endl;
else
cout << "SUV" << endl;
cout << "Result: " << result << endl;
waitKey(0);
}
Two things solved this issue:
1) I got a larger dataset of vehicles. I used about 400 SUV images and 400 sedan images for the training portion and then another 50 images for the testing portion.
2) In: Mat HOGFeat_test(1, derSize, CV_32FC1), I had the wrong derSize by about an order of magnitude larger. The actual size was 15120, but I had the Mat have 113400 columns. Thus, I filled only about 10% of the testing mat with useful feature data, so it was much harder for the SVM to tell any difference between SUVs and Sedans.
Now it works great with both the linear and poly kernel (C = 10), and my accuracy is better than I expected at a whopping 96%.
I'm using Haar-Cascade Classifier in order to detect faces.
I'm currently facing some problems with the following function:
void ImageManager::detectAndDisplay(Mat frame, CascadeClassifier face_cascade){
string window_name = "Capture - Face detection";
string filename;
std::vector<Rect> faces;
std::vector<Rect> eyes;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
crop = frame_gray(roi_c);
faces_img.push_back(crop);
rectangle(frame, Point(roi_c.x, roi_c.y), Point(roi_c.x + roi_c.width, roi_c.y + roi_c.height), Scalar(0,0,255), 2);
}
imshow("test", frame);
waitKey(0);
cout << faces_img.size();
}
The frame is the photo I'm trying to scan.
The face_cascade is the classifier.
internally, the CascadeClassifier does several detections, and groups those.
minNeighbours (in the detectMultiScale call) is the amount of detections in about the same place nessecary to count as a valid detection, so increase that from your current 2 to maybe 5 or so, until you start to miss positives.
As an addition to berak's statement, it's not only about reducing/increasing of detectMultiScale parameters if you're not doing the stuff only on an image. You'll face performance problems that do not let the user use the application.
Performance issues are relying on miscalculations. And what calculation takes is just testing.
If you are not trying to have the best results under different light conditions(since this is visual-dependent information) you'll have to scale the input array before sending it as an argument to detectMultiScale function. Once detection's completed, rescale to the previous size(it may be done by changing the rectangle's size that's used as an argument for detectMultiScale).
I am trying to substract background from depth images acquired with kinect. When I learned what otsu thresholding is I thought that it could with it. Converting the depth image to grayscale i can hopefully apply otsu threshold to binarize the image.
However I implemented (tried to implemented) this with OpenCV 2.3, it came in vain. The output image is binarized however, very unexpectedly. I did the thresholding continuously (i.e print the result to screen to analyze for each frame) and saw that for some frames threshold is found to be 160ish and sometimes it is found to be 0. I couldn't quite understand why this is happening. May it be due to the high number of 0's in the depth image returned by kinect, which corresponds to pixels that can not be measured. Is there a way that I could tell the algorithm to ignore pixels having the value 0? Or otsu thresholding is not good for what I am trying to do?
Here are some outputs and segment of the related code. You may notice that the second screenshot looks like it could do some good binarization, however i want to achieve one that distincly differentiates between pixels corresponding to the chair in the scene and the backgroung.
Thanks.
cv::Mat1s depthcv(depth->getHeight(), depth->getWidth());
cv::Mat1b depthcv8(depth->getHeight(), depth->getWidth());
cv::Mat1b depthcv8_th(depth->getHeight(), depth->getWidth());
depthcv.data =(uchar*) depth->getDepthMetaData().Data();
depthcv.convertTo(depthcv8,CV_8U,255/5000.f);
//apply otsu thresholding
cv::threshold(depthcv8, depthcv8_th, 128, 255, CV_THRESH_BINARY|CV_THRESH_OTSU);
std::ofstream output;
output.open("output.txt");
//output << "M = "<< endl << " " << depthcv8 << endl << endl;
cv::imshow("lab",depthcv8_th);
cv::waitKey(1);
Otsu is probably good enough for what you are trying to do, but you do need to mask out the zero values before computing the optimal threshold with the Otsu algorithm, otherwise the distribution of intensity values will be skewed lower than what you want.
OpenCV does not provide a mask argument for the cv::threshold function, so you will have to remove those values yourself. I would recommend putting all the non-zero values in a 1 by N matrix, and calling the cv::threshold function with CV_THRESH_OTSU and saving the return value (which is the estimated optimal threshold), and then running the cv::threshold function again on the original image with just the CV_THRESH_BINARY flag and the computed threshold.
Here is one possible implementation:
// move zeros to the back of a temp array
cv::Mat copyImg = origImg;
uint8* ptr = copyImg.datastart;
uint8* ptr_end = copyImg.dataend;
while (ptr < ptr_end) {
if (*ptr == 0) { // swap if zero
uint8 tmp = *ptr_end;
*ptr_end = *ptr;
*ptr = tmp;
ptr_end--; // make array smaller
} else {
ptr++;
}
}
// make a new matrix with only valid data
cv::Mat nz = cv::Mat(std::vector<uint8>(copyImg.datastart,ptr_end),true);
// compute optimal Otsu threshold
double thresh = cv::threshold(nz,nz,0,255,CV_THRESH_BINARY | CV_THRESH_OTSU);
// apply threshold
cv::threshold(origImg,origImg,thresh,255,CV_THRESH_BINARY_INV);
I'm currently working on Image stitching using OpenCV 2.3.1 on Visual Studio 2010, but I'm having some trouble.
Problem Description
I'm trying to write a code for stitching multiple images derived from a few cameras(about 3~4), i,e, the code should keep executing image stitching until I ask it to stop.
The following is what I've done so far:
(For simplification, I'll replace some part of the code with just a few words)
1.Reading frames(images) from 2 cameras (Currently I'm just working on 2 cameras.)
2.Feature detection, descriptor calculation (SURF)
3.Feature matching using FlannBasedMatcher
4.Removing outliers and calculate the Homography with inliers using RANSAC.
5.Warp one of both images.
For step 5., I followed the answer in the following thread and just changed some parameters:
Stitching 2 images in opencv
However, the result is terrible though.
I just uploaded the result onto youtube and of course only those who have the link will be able to see it.
http://youtu.be/Oy5z_7LeaMk
My code is shown below:
(Only crucial parts are shown)
VideoCapture cam1, cam2;
cam1.open(0);
cam2.open(1);
while(1)
{
Mat frm1, frm2;
cam1 >> frm1;
cam2 >> frm2;
//(SURF detection, descriptor calculation
//and matching using FlannBasedMatcher)
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
(Draw only "good" matches
(i.e. whose distance is less than 3*min_dist ))
vector<Point2f> frame1;
vector<Point2f> frame2;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
frame1.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
frame2.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( Mat(frame1), Mat(frame2), CV_RANSAC );
cout << "Homography: " << H << endl;
/* warp the image */
Mat warpImage2;
warpPerspective(frm2, warpImage2,
H, Size(frm2.cols, frm2.rows), INTER_CUBIC);
Mat final(Size(frm2.cols*3 + frm1.cols, frm2.rows),CV_8UC3);
Mat roi1(final, Rect(frm1.cols, 0, frm1.cols, frm1.rows));
Mat roi2(final, Rect(2*frm1.cols, 0, frm2.cols, frm2.rows));
warpImage2.copyTo(roi2);
frm1.copyTo(roi1);
imshow("final", final);
What else should I do to make the stitching better?
Besides, is it reasonable to make the Homography matrix fixed instead of keeping computing it ?
What I mean is to specify the angle and the displacement between the 2 cameras by myself so as to derive a Homography matrix that satisfies what I want.
Thanks. :)
It sounds like you are going about this sensibly, but if you have access to both of the cameras, and they will remain stationary with respect to each other, then calibrating offline, and simply applying the transformation online will make your application more efficient.
One point to note is, you say you are using the findHomography function from OpenCV. From the documentation, this function:
Finds a perspective transformation between two planes.
However, your points are not restricted to a specific plane as they are imaging a 3D scene. If you wanted to calibrate offline, you could image a chessboard with both cameras, and the detected corners could be used in this function.
Alternatively, you may like to investigate the Fundamental matrix, which can be calculated with a similar function. This matrix describes the relative position of the cameras, but some work (and a good textbook) will be required to extract them.
If you can find it, I would strongly recommend having a look at Part II: "Two-View Geometry" in the book "Multiple View Geometry in computer vision", by Richard Hartley and Andrew Zisserman, which goes through the process in detail.
I have been working lately on image registration. My algorithm takes two images, calculates the SURF features, find correspondences, find homography matrix and then stitch both images together, I did it with the next code:
void stich(Mat base, Mat target,Mat homography, Mat& panorama){
Mat corners1(1, 4,CV_32F);
Mat corners2(1,4,CV_32F);
Mat corners(1,4,CV_32F);
vector<Mat> planes;
/* compute corners
of warped image
*/
corners1.at<float>(0,0)=0;
corners2.at<float>(0,0)=0;
corners1.at<float>(0,1)=0;
corners2.at<float>(0,1)=target.rows;
corners1.at<float>(0,2)=target.cols;
corners2.at<float>(0,2)=0;
corners1.at<float>(0,3)=target.cols;
corners2.at<float>(0,3)=target.rows;
planes.push_back(corners1);
planes.push_back(corners2);
merge(planes,corners);
perspectiveTransform(corners, corners, homography);
/* compute size of resulting
image and allocate memory
*/
double x_start = min( min( (double)corners.at<Vec2f>(0,0)[0], (double)corners.at<Vec2f> (0,1)[0]),0.0);
double x_end = max( max( (double)corners.at<Vec2f>(0,2)[0], (double)corners.at<Vec2f>(0,3)[0]), (double)base.cols);
double y_start = min( min( (double)corners.at<Vec2f>(0,0)[1], (double)corners.at<Vec2f>(0,2)[1]), 0.0);
double y_end = max( max( (double)corners.at<Vec2f>(0,1)[1], (double)corners.at<Vec2f>(0,3)[1]), (double)base.rows);
/*Creating image
with same channels, depth
as target
and proper size
*/
panorama.create(Size(x_end - x_start + 1, y_end - y_start + 1), target.depth());
planes.clear();
/*Planes should
have same n.channels
as target
*/
for (int i=0;i<target.channels();i++){
planes.push_back(panorama);
}
merge(planes,panorama);
// create translation matrix in order to copy both images to correct places
Mat T;
T=Mat::zeros(3,3,CV_64F);
T.at<double>(0,0)=1;
T.at<double>(1,1)=1;
T.at<double>(2,2)=1;
T.at<double>(0,2)=-x_start;
T.at<double>(1,2)=-y_start;
// copy base image to correct position within output image
warpPerspective(base, panorama, T,panorama.size(),INTER_LINEAR| CV_WARP_FILL_OUTLIERS);
// change homography to take necessary translation into account
gemm(T, homography,1,T,0,T);
// warp second image and copy it to output image
warpPerspective(target,panorama, T, panorama.size(),INTER_LINEAR);
//tidy
corners.release();
T.release();
}
Any question I will try