WINDOW_AUTOSIZE in namedWindow() method - opencv

I want to enable the user to be able to draw rectangles and define regions in the image window. The trouble is that the image has to strictly be of A4 size dimensions which is why I have used the WINDOW_AUTOSIZE mode while calling namedWindow. But this seems to mess up the points P1 and P2 that are used to make the rectangle. This somehow doesn't happen with WINDOW_NORMAL mode.
Here is the code:
using namespace std;
using namespace cv;
cv::Point P1;
cv::Point P2;
Mat temp;
void CallBack(int event, int x, int y, int flags, void* userdata) {
if(event == CV_EVENT_LBUTTONDOWN) {
P1.x = x;
P1.y = y;
}
else if(event == CV_EVENT_LBUTTONUP) {
P2.x = x;
P2.y = y;
rectangle(temp, P1, P2, Scalar(0, 0, 0), 1);
}
}
int main() {
temp = imread("template.png");
namedWindow("Templating", CV_WINDOW_AUTOSIZE);
imshow("Templating", temp);
cv::setMouseCallback("Templating", CallBack, NULL);
waitKey(0);
imshow("Templating", temp);
waitKey(0);
return 0;
}
Thanks in advance!!

Related

mapping depth image to color image with realsense2 about opencv

I use librealsense2 library.
I refer to this site.. https://github.com/IntelRealSense/librealsense/blob/master/examples/align/rs-align.cpp
After mapping depth image to color image with realsense2 library,
I want to display the image with opencv Mat(imshow) function.
so i coded as..
#include "librealsense2/rs.hpp"
#include <opencv2/opencv.hpp>
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>
using namespace std;
using namespace cv;
void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);
int main(int args, char * argv[]) try
{
// Create and initialize GUI related objects
rs2::colorizer c;
rs2::config cfg;
rs2::pipeline pipe;
const int width = 1280;
const int height = 720;
c.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED, 1.f);
c.set_option(RS2_OPTION_COLOR_SCHEME, 2.f); // White to Black
cfg.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_BGR8, 30);
cfg.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, 30);
rs2::pipeline_profile profile = pipe.start(cfg);
float depth_scale = get_depth_scale(profile.get_device());
rs2_stream align_to = find_stream_to_align(profile.get_streams());
rs2::align align(align_to);
float depth_clipping_distance = 3.f;
while (true)
{
rs2::frameset frameset = pipe.wait_for_frames();
if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
{
profile = pipe.get_active_profile();
align_to = find_stream_to_align(profile.get_streams());
align = rs2::align(align_to);
depth_scale = get_depth_scale(profile.get_device());
}
auto processed = align.process(frameset);
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
if (!aligned_depth_frame || !other_frame)
{
continue;
}
remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
namedWindow("other window", WINDOW_AUTOSIZE);
namedWindow("depth window", WINDOW_AUTOSIZE);
imshow("other window", other_frameaM);
imshow("depth window", aligned_depthM);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));
int width = other_frame.get_width();
int height = other_frame.get_height();
int other_bpp = other_frame.get_bytes_per_pixel();
#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];
// Check if the depth value is invalid (<=0) or greater than the threashold
if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
{
// Calculate the offset in other frame's buffer to current pixel
auto offset = depth_pixel_index * other_bpp;
// Set pixel to "background" color (0x999999)
std::memset(&p_other_frame[offset], 0x99, other_bpp);
}
}
}
}
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
//We prioritize color streams to make the view look better.
//If color is not available, we take another stream that (other than depth)
rs2_stream align_to = RS2_STREAM_ANY;
bool depth_stream_found = false;
bool color_stream_found = false;
for (rs2::stream_profile sp : streams)
{
rs2_stream profile_stream = sp.stream_type();
if (profile_stream != RS2_STREAM_DEPTH)
{
if (!color_stream_found) //Prefer color
align_to = profile_stream;
if (profile_stream == RS2_STREAM_COLOR)
{
color_stream_found = true;
}
}
else
{
depth_stream_found = true;
}
}
if (!depth_stream_found)
throw std::runtime_error("No Depth stream available");
if (align_to == RS2_STREAM_ANY)
throw std::runtime_error("No stream found to align with Depth");
return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
for (auto&& sp : prev)
{
//If previous profile is in current (maybe just added another)
auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
if (itr == std::end(current)) //If it previous stream wasn't found in current
{
return true;
}
}
return false;
}
There are only gray screens and nothing happens.
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
I guess there are no problem. because the depth image and rgb image were opened well in CV_8UC3 format.
However, when I try to calibrate and then I got it in opencv, the image appears only in gray screen.
auto frames = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::video_frame color = frames.get_color_frame();
rs2::depth_frame depth = color_map(frames.get_depth_frame());
if (!color)
color = frames.get_infrared_frame();
Mat colorM(Size(width, height), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
Mat depthM(Size(width, height), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
It is a part of code that output color image and depth image.
This works well.
so I guess..
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
Whatever the process, I thought it would run because it fetches it in frame format. I think I have a very big mistake on this code side.
Which part is wrong?
enter image description here
There are several ways to store an image in memory. There is no guarantee that you can just pass the buffer and it'll all work. try to copy pixel by pixel.
You should know that OpenCV uses BGR interleaved image format, while realsense might use another.
1) Get aligned frames
frameset data = pipe.wait_for_frames();
frameset aligned_set = align_to.process(data);
auto color_mat = frame_to_mat(aligned_set.get_color_frame());
auto depth_mat = frame_to_mat(aligned_set.get_depth_frame());
2) frame_to_mat helper function
cv::Mat frame_to_mat(const rs2::frame& f)
{
using namespace cv;
using namespace rs2;
auto vf = f.as<video_frame>();
const int w = vf.get_width();
const int h = vf.get_height();
if (f.get_profile().format() == RS2_FORMAT_BGR8)
{
return Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_RGB8)
{
auto r = Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
cvtColor(r, r, CV_RGB2BGR);
return r;
}
else if (f.get_profile().format() == RS2_FORMAT_Z16)
{
return Mat(Size(w, h), CV_16UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_Y8)
{
return Mat(Size(w, h), CV_8UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
throw std::runtime_error("Frame format is not supported yet!");
}

OpenCV: How to use AffineTransformer

Hello and thanks for your help.
I would like to test the use of shapes for matching in OpenCV and managed to do the matching part.
To locate the rotated shape, i tought the AffineTransformer Class would be the right choice. As I don't know how the matching would work internally, it would be nice if someone has a link where the proceedings are described.
As shawshank mentioned my following code throw an Assertion failed-error because the variable matches is empty when passed to estimateTransformation function. Does anybody know how to use this function in the right way -respectively what it really does?
#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
bool rotateImage(Mat src, Mat &dst, double angle)
{
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(src, dst, rot, bbox.size());
return 1;
}
static vector<Point> sampleContour( const Mat& image, int n=300 )
{
vector<vector<Point>> contours;
vector<Point> all_points;
findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// shuffel
random_shuffle(all_points.begin(), all_points.end());
vector<Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(void)
{
Mat img1, img2;
vector<Point> img1Points, img2Points;
float distSC, distHD;
// read images
string img1Path = "testimage.jpg";
img1 = imread(img1Path, IMREAD_GRAYSCALE);
rotateImage(img1, img2, 45);
imshow("original", img1);
imshow("transformed", img2);
waitKey();
// Contours
img1Points = sampleContour(img1);
img2Points = sampleContour(img2);
//Calculate Distances
Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();
distSC = mysc->computeDistance( img1Points, img2Points );
distHD = myhd -> computeDistance( img1Points, img2Points );
cout << distSC << endl << distHD << endl;
vector<DMatch> matches;
Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
transformerHD -> estimateTransformation(img1Points, img2Points, matches);
return 0;
}
I have used AffineTransformer class on a 2D image. Below is the basic code which will give you an idea of what it does.
// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;
int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};
int main()
{
// Prepare 'vector of points' from above hardcoded points
int sInd=0, eInd=arrSize;
vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));
// Create object of AffineTransformer
bool fullAffine = true; // change its value and see difference in result
auto aft = cv::createAffineTransformer(fullAffine);
// Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
std::vector<cv::DMatch> matches;
for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));
// Read image
Mat srcImg = imread("image1.jpg");
Mat tgtImg;
// estimate points transformation
aft->estimateTransformation(sourceP, tgtP, matches);
// apply transformation
aft->applyTransformation(sourceP, tgtP);
// warp image
aft->warpImage(srcImg, tgtImg);
// show generated output
imshow("warped output", tgtImg);
waitKey(0);
return 0;
}

opencv r6010 abort() has been called error in visual studio 2013

I have some code to draw a line between two points on an image which are selected by mouse, and then to display a histogram.
However, when I press q as required by code I get an error saying R6010 abort() has been called and saying VC++ run time error.
Please advise me how I can find this error.
#include <vector>
#include "opencv2/highgui/highgui.hpp"
#include <opencv\cv.h>
#include <iostream>
#include<conio.h>
using namespace cv;
using namespace std;
struct Data_point
{
int x;
unsigned short int y;
};
int PlotMeNow(unsigned short int *values, unsigned int nSamples)
{
std::vector<Data_point> graph(nSamples);
for (unsigned int i = 0; i < nSamples; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}
cv::Size imageSize(5000, 500); // your window size
cv::Mat image(imageSize, CV_8UC1);
if (image.empty()) //check whether the image is valid or not
{
std::cout << "Error : Image cannot be created..!!" << std::endl;
system("pause"); //wait for a key press
return 0;
}
else
{
std::cout << "Good job : Image created successfully..!!" << std::endl;
}
// tru to do some ofesseting so the graph do not hide on x or y axis
Data_point dataOffset;
dataOffset.x = 20;
// we have to mirror the y axis!
dataOffset.y = 5000;
for (unsigned int i = 0; i<nSamples; ++i)
{
graph[i].x = (graph[i].x + dataOffset.x) * 3;
graph[i].y = (graph[i].y + dataOffset.y) / 200;
}
// draw the samples
for (unsigned int i = 0; i<nSamples - 1; ++i)
{
cv::Point2f p1;
p1.x = graph[i].x;
p1.y = graph[i].y;
cv::Point2f p2;
p2.x = graph[i + 1].x;
p2.y = graph[i + 1].y;
cv::line(image, p1, p2, 'r', 1, 4, 0);
}
cv::namedWindow("MyWindow1", CV_WINDOW_AUTOSIZE); //create a window with the name "MyWindow"
cv::imshow("MyWindow1", image); //display the image which is stored in the 'img' in the "MyWindow" window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("MyWindow1");
destroyWindow("MyWindow"); //destroy the window with the name, "MyWindow"
return 0;
}
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
LineIterator it(image, p2, p1, 8);
for (int i = 0; i < it.count; i++, it++)
{
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
}
*count1 = it.count;
}
//working line with mouse
void onMouse(int evt, int x, int y, int flags, void* param)
{
if (evt == CV_EVENT_LBUTTONDOWN)
{
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x, y));
}
}
void drawline(Mat image, std::vector<Point>& points)
{
cv::namedWindow("Output Window");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1 = 0, Y1 = 0, X2 = 0, Y2 = 0;
while (1)
{
cv::imshow("Output Window", image);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1 = points[0].x;
X2 = points[1].x;
Y1 = points[0].y;
Y2 = points[1].y;
// Draw a line
line(image, Point(X1, Y1), Point(X2, Y2), 'r', 2, 8);
cv::imshow("Output Window", image);
//exit image window
while (true)
{
char c = cv::waitKey(10);
if (c == 'q')
break;
}
destroyWindow("Output Window");
}
void show_histogram_image(Mat img1)
{
int sbins = 65536;
int histSize[] = { sbins };
float sranges[] = { 0, 65536 };
const float* ranges[] = { sranges };
cv::MatND hist;
int channels[] = { 0 };
cv::calcHist(&img1, 1, channels, cv::Mat(), // do not use mask
hist, 1, histSize, ranges,
true, // the histogram is uniform
false);
double maxVal = 0;
minMaxLoc(hist, 0, &maxVal, 0, 0);
int xscale = 10;
int yscale = 10;
cv::Mat hist_image;
hist_image = cv::Mat::zeros(65536, sbins*xscale, CV_16UC1);
for int s = 0; s < sbins; s++)
{
float binVal = hist.at<float>(s, 0);
int intensity = cvRound(binVal * 65535 / maxVal);
rectangle(hist_image, cv::Point(s*xscale, hist_image.rows),
cv::Point((s + 1)*xscale - 1, hist_image.rows - intensity),
cv::Scalar::all(65535), 1);
}
imshow("Histogram", hist_image);
waitKey(0);
}
int main()
{
vector<Point> points1;
vector<ushort>linePixels;
Mat img = cvLoadImage("desert.jpg");
if (img.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
//Draw the line
drawline(img, points1);
//now check the collected points
Mat img1 = cvLoadImage("desert.jpg");
if (img1.empty()) //check whether the image is valid or not
{
cout << "Error : Image cannot be read..!!" << endl;
system("pause"); //wait for a key press
return -1;
}
int *t = new int;
IterateLine( img1, linePixels, points1[1], points1[0], t );
PlotMeNow(&linePixels[0], t[0]);
show_histogram_image(img);
delete t;
_getch();
return 0;
}
This is one of the bad smells in your code:
void IterateLine(const Mat& image, vector<ushort>& linePixels, Point p2, Point p1, int* count1)
{
...
linePixels.push_back(image.at<ushort>(it.pos())); //doubt
Now image is a CV_8UC3 image (from Mat img1 = cvLoadImage("desert.jpg");, but you are accessing here like it is CV_16UC1, so what gets put in linePixels is garbage. This will almost certainly cause PlotMeNow() to draw outside its image and corrupt something, which is probably why your code is crashing.
Sine it is very unclear what your code is trying to do, I can't suggest what you should have here instead.
I have just managed to do this, you only have to put "-1" to your loop limit:
for (unsigned int i = 0; i < nSamples-1; i++)
{
graph[i].x = i;
graph[i].y = values[i];
}

How to extract the image (basically MAT) from Poly draw from OPENCV?

I am writing a simple opencv program to extract the image and get the Matrix of image from a ploy drawed by myself. The code below gives a example of drawing a poly with a few points given, when I finished drawing the poly in red color, I wanted to use findContours to extract the only poly out of picture and get matrix from that contour.
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
Mat grey_img;
Mat img_resized;
bool start = 0;
cv::Point lastPoint = Point(-1,-1);
vector<Point> parkspoint;
void mouseMode2(int event, int x, int y, void* param);
void mouseHandler(int event, int x, int y, int flags, void* param);
void mouseHandler(int event, int x, int y, int flags, void* param){
mouseMode2(event,x,y,param);
}
void mouseMode2(int event, int x, int y, void* param){
//Boolean select_flag = CV_EVENT_FLAG_SHIFTKEY;
if (event == CV_EVENT_LBUTTONDOWN && start)
{
cout<<"select one point "<<x<<" "<< y<<endl;
cv::Point point = cv::Point(x, y);
cout<<"last point x:"<<lastPoint.x <<" y:"<<lastPoint.y<<endl;
// add point
if(lastPoint.x!=-1 && lastPoint.y!=-1){
cv::line(img_resized, lastPoint, point, CV_RGB(255, 0, 0), 1, 1, 0);
cv::imshow("parking", img_resized);
}
cout<<"add point to array"<<endl;
lastPoint = Point(x,y);
parkspoint.push_back(point);
}
if (event == CV_EVENT_MOUSEMOVE && start){
cv::Point point = cv::Point(x, y);
if(lastPoint.x!=-1 && lastPoint.y!=-1)
{
cv::Mat img1 = img_resized.clone();
cv::line(img1, lastPoint, point, CV_RGB(255, 0, 0), 1, 1, 0);
cv::imshow("parking", img1);
}
}
if (event == CV_EVENT_RBUTTONUP )
{
cout<<"end selecting"<<endl;
start = 0;
if(lastPoint.x!=-1 && lastPoint.y!=-1){
cv::line(img_resized, lastPoint, parkspoint[0], CV_RGB(255, 0, 0), 1, 1, 0);
cv::imshow("parking", img_resized);
}
for(int i=0;i<parkspoint.size();i++){
cout<<"show points "<<i<<" "<< parkspoint[i].x<<":"<< parkspoint[i].y <<endl;
}
std::vector<std::vector<cv::Point> > contours;
cv::imshow("parking", img_resized);
cv::findContours(img_resized,contours,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
cout<<"find how many coutours : "<< contours.size()<< endl;
for(int i = 0;i <contours.size();i++){
cv::drawContours(grey_img,contours,i,cv::Scalar(255,0,0),1);
}
cv::imshow("parking2", grey_img);
}
if (event == CV_EVENT_RBUTTONDOWN )
{
cout<<"start selecting"<<endl;
start = 1;
}
}
int main(int argc, char* argv[])
{
Mat img_raw = imread("D:/car parking/bb.jpg", 1); // load as color image
resize(img_raw, img_resized, Size(64,128) );
grey_img = img_resized.clone();
cout << "raw img dimensions: " << img_raw.cols << " width x " << img_raw.rows << "height" << endl;
cout << "img dimensions: " << img_resized.cols << " width x " << img_resized.rows << "height" << endl;
cv::cvtColor(img_resized,img_resized,CV_BGR2GRAY);
namedWindow("parking",CV_WINDOW_NORMAL);
imshow("parking",img_resized);
namedWindow("parking2",CV_WINDOW_NORMAL);
imshow("parking2",grey_img);
cv::setMouseCallback("parking",mouseHandler,0);
waitKey(0);
return 0;
}
However, I met with problems that , the left image is the poly I draw by myself, when applied with findContour, it gave me three polys.
First, the largest contour is the rectangle of the whole picture, which I don't want it, I could compare the size of contour to get rid of it, but if there is any other good/smart way to get rid of the big rectangle in prior.
Second, there are two contour are much similar,more like outer and inner border of the shape I draw, that Mat of any contour is what I want for the final result. you expect me to use any of this contour, pick any one is ok. but what if I draw many shapes, for each shape gives birth a brother, then it is complicated to sort them out
This is the end result I expect, the cropped image

opencv set mouse callback

I'm a beginner in OpenCV and would like to print in my console the specific value of a pixel (RGB format) that I define by clicking on it.
After some searches I managed to prind the coordinates of the click I make on the image.
If anyone knows how to do that please modify this code that I am using:
void mouseEvent (int evt, int x, int y, int flags, void* param)
{
if (evt == CV_EVENT_LBUTTONDOWN)
{
printf("%d %d\n",x ,y );
}
}
and this is what I use to call the function:
cvSetMouseCallback("blah blah", mouseEvent, 0);
Place your image in a Mat called frame, then:
namedWindow("test");
cvSetMouseCallback("test", mouseEvent, &frame);
char key = 0;
while ((int)key != 27) {
imshow("test", frame);
key = waitKey(1);
}
where mouseEvent is defined as:
void mouseEvent(int evt, int x, int y, int flags, void* param) {
Mat* rgb = (Mat*) param;
if (evt == CV_EVENT_LBUTTONDOWN) {
printf("%d %d: %d, %d, %d\n",
x, y,
(int)(*rgb).at<Vec3b>(y, x)[0],
(int)(*rgb).at<Vec3b>(y, x)[1],
(int)(*rgb).at<Vec3b>(y, x)[2]);
}
}

Resources