I know this theme already exists, but I didn't find any solution for this.
I am trying to detect characters from picture in this code below:
#include <tesseract/baseapi.h>
#include <leptonica/allheaders.h>
#include <opencv2/opencv.hpp>
#include <sstream>
#include <memory>
#include <iostream>
#define path "/home/jovan/Pictures/"
void resize(cv::Mat &img);
PIX *mat8ToPix(const cv::Mat *mat8);
cv::Mat pix8ToMat(PIX *pix8);
int main(int argc, char **argv)
{
// Load image
std::stringstream ss;
ss << path;
ss << argv[1];
cv::Mat im = cv::imread(ss.str() );
if (im.empty())
{
std::cout<<"Cannot open source image!" << std::endl;
return EXIT_FAILURE;
}
resize(im);
cv::Mat gray;
cv::cvtColor(im, gray, CV_BGR2GRAY);
// Pass it to Tesseract API
tesseract::TessBaseAPI tess;
tess.Init(NULL, "eng", tesseract::OEM_DEFAULT);
tess.SetPageSegMode(tesseract::PSM_SINGLE_BLOCK);
tess.SetVariable("tessedit_char_whitelist", "QWERTYUIOPASDFGHJKLZXCVBNM");
PIX *image = mat8ToPix(&im);
//tess.SetImage((uchar*)gray.data, gray.cols, gray.rows, 1, gray.cols);
tess.SetImage(image);
// Get the text
char* out = tess.GetUTF8Text();
if(out != nullptr)
std::cout << "here it is: "<< out << std::endl;
cv::imshow("image", im);
cv::imshow("gray", gray);
cv::waitKey();
return 0;
}
void resize(cv::Mat &img)
{
while(img.size().width >= 500 && img.size().height >= 500 )
cv::resize(img, img, cv::Size(img.size().width/2, img.size().height/2) );
}
PIX *mat8ToPix(const cv::Mat *mat8)
{
PIX *pixd = pixCreate(mat8->size().width, mat8->size().height, 8);
for(int y=0; y<mat8->rows; y++)
for(int x=0; x<mat8->cols; x++)
pixSetPixel(pixd, x, y, (l_uint32) mat8->at<uchar>(y,x));
return pixd;
}
cv::Mat pix8ToMat(PIX *pix8)
{
cv::Mat mat(cv::Size(pix8->w, pix8->h), CV_8UC1);
uint32_t *line = pix8->data;
for (uint32_t y = 0; y < pix8->h; ++y)
{
for (uint32_t x = 0; x < pix8->w; ++x)
mat.at<uchar>(y, x) = GET_DATA_BYTE(line, x);
line += pix8->wpl;
}
return mat;
}
whatever picture I put to process I get this on terminal:
$: Warning: Invalid resolution 0 dpi. Using 70 instead.
Does anyone have some solution?
Thanks in advance.
If you know the input image's resolution, you can call pixSetResolution on Leptonica Pix object.
Or use Tesseract API to pass in the value. See
Tess4j - Pdf to Tiff to tesseract - "Warning: Invalid resolution 0 dpi. Using 70 instead."
Maybe it helps: I used EMGU & C#, but I think it must be the same in C++:
ocr.SetVariable("user_defined_dpi", "70");
... and the message should disappear ;)
I had similar issue. Found out from here that dark background in the image is the problem. Inversion of the image colors worked.
Related
Hi. I have the above image and use the "findContours" function.
And then I use the "convexity defects" functions to find the corner points.
The result is as follows.
The problem with this code is that it can not find the rounded corners.You can not find a point like the following.
This is my code
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("find_Contours.png");
//Prepare the image for findContours
cv::cvtColor(image, image, CV_BGR2GRAY);
cv::threshold(image, image, 128, 255, CV_THRESH_BINARY);
//Find the contours. Use the contourOutput Mat so the original image doesn't get overwritten
std::vector<std::vector<cv::Point> > contours;
cv::Mat contourOutput = image.clone();
cv::findContours(contourOutput, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
////convexityDefects
vector<vector<Point> >hull(contours.size());
vector<vector<int> > hullsI(contours.size()); // Indices to contour points
vector<vector<Vec4i>> defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(contours[i], hull[i], false);
convexHull(contours[i], hullsI[i], false);
if (hullsI[i].size() > 3) // You need more than 3 indices
{
convexityDefects(contours[i], hullsI[i], defects[i]);
}
}
///// Draw convexityDefects
for (int i = 0; i < contours.size(); ++i)
{
for (const Vec4i& v : defects[i])
{
float depth = v[3]/256;
if (depth >= 0) // filter defects by depth, e.g more than 10
{
int startidx = v[0]; Point ptStart(contours[i][startidx]);
int endidx = v[1]; Point ptEnd(contours[i][endidx]);
int faridx = v[2]; Point ptFar(contours[i][faridx]);
circle(image, ptFar, 4, Scalar(255, 255, 255), 2);
cout << ptFar << endl;
}
}
}
//
cv::imshow("Input Image", image);
cvMoveWindow("Input Image", 0, 0);
//
waitKey(0);
}
Can someone make the code and find the red dot? please help.
now i want find "convexity defects" from inside,not outside like this image:
Someone can help me??
It is very important to use
convexHull(contours[i], hullsI[i], true);
That is, with the last argument "true" for indices. I'm almost certain this is the reason it cannot find all the defects. Before fixing this, it is not much sense try to find other bugs (if any).
I have recently used this piece of code to save frame data from a webcam
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <opencv2/opencv.hpp>
using namespace cv;
#include <fstream>
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
cap.set(CV_CAP_PROP_FPS, 15);
Mat edges;
namedWindow("image", 1);
std::vector<cv::Mat> images(100);
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
for (int i = 0; i < 100; ++i)
{
Mat frame;
cap >> frame; // get a new frame from camera
frame.copyTo(images[i]);
}
cap.release();
for (int i = 0; i < 100; ++i)
{
imshow("image", images[i]);
if (waitKey(30) >= 0) break;
}
After this, I want to use imread to analyse the newly splitted frames. However, I cannot think of a way to accomplish this.
I tried: Mat colorImage = imread(images[i]);
However, it leads to:
error C2664: 'cv::Mat cv::imread(const cv::String &,int)': cannot convert argument 1 from 'std::vector<cv::Mat,std::allocator<_Ty>>' to 'const cv::String &'
with
[
_Ty=cv::Mat
]
Thanks a lot in advance :)
imread function is used to open the image from disk.
You already have vector of images so you just do:
Mat colorImage = images[i];
and btw. there is no need for this:
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
because you are allocating new space anyway except you capture the frames directly like this:
cap >> images[i];
Why is webcam image processing is very slow while using Xcode for this OpenCV project, and only one out of three windows are working (similar spaces and HSV windows are not turning up) and are very slow? How to increase the speed of execution of the program?
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat img, hsv, res;
char *win1 = "RGB";
char *win2 = "HSV";
char *win3 = "similar spaces";
uchar thresh = 5;
void setColor(uchar hval){
int i,j;
for (i = 0; i < res.rows; ++i){
for (j = 0; j < res.cols; ++j){
if( hsv.at<Vec3b>(i,j)[0] <= hval+thresh
&& hsv.at<Vec3b>(i,j)[0] >= hval-thresh)
res.at<uchar>(i,j) = 255;
else res.at<uchar>(i,j) = 0;
}
}
imshow(win3, res);
}
void MouseCallBackFunc(int event, int x, int y, int flags, void* userdata){
if(event==EVENT_LBUTTONDOWN){
cout<<"\t x,y : "<<x<<','<<y<<endl;
cout<<'\t'<<img.at<Vec3b>(y,x)<<endl;
setColor(hsv.at<Vec3b>(y,x)[0]);
}
}
int main()
{
img = imread("/usr/share/opencv/samples/cpp/stuff.jpg", CV_LOAD_IMAGE_COLOR);
hsv = Mat::zeros(img.size(), CV_8UC3);
res = Mat::zeros(img.size(), CV_8UC1);
char c;
int i,j;
namedWindow(win2, CV_WINDOW_NORMAL);
namedWindow(win3, CV_WINDOW_NORMAL);
cvtColor(img, hsv, CV_RGB2HSV);
imshow(win1, img);
imshow(win2, hsv);
imshow(win3, res);
setMouseCallback(win1, MouseCallBackFunc, NULL);
// VideoCapture stream(0); //0 is the id of video device.0 if you have only one camera.
// if (!stream.isOpened()) { //check if video device has been initialised
// cout << "cannot open camera";
// }
// while (true) {
// Mat cameraFrame;
// stream.read(cameraFrame);
// imshow("test", cameraFrame);
// c = waitKey(30);
// if(c==27)
// break;
// }
while((c=waitKey(300))!=27){}
return 0;
}
I wanna segementing a solid blobs for each object from extracted foreground and bounding each object with a box. But my code show many boxes bounding random blobs on 1 object, because my blob is not solid for 1 object and there're many small blobs too.
Here we go my code:
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,frame,cv::Size(10,10));
bgs.operator()(frame,fg);
bgs.getBackgroundImage(bgmodel);
cv::erode(fg,fg,cv::Mat());
cv::dilate(fg,fg,cv::Mat());
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
And the result here:
Frame
so how I can segmenting a solid blob for each object found from extracted foreground, and bounding the object on by one with the box?
a solid blob mean a solid white blob like here: xxx
I'll apreciating any help here.
NB: Sorry for my bad English. :)
=================
This is my edited code!
#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
int main(int argc, char *argv[])
{
cv::Mat frame;
cv::Mat fg;
cv::Mat blurred;
cv::Mat thresholded;
cv::Mat thresholded2;
cv::Mat result;
cv::Mat bgmodel;
cv::namedWindow("Frame");
cv::namedWindow("Background Model");
cv::VideoCapture cap(0);
cv::BackgroundSubtractorMOG2 bgs;
bgs.nmixtures = 2;
bgs.history = 60;
bgs.varThreshold = 15;
bgs.bShadowDetection = true;
bgs.nShadowDetection = 0;
bgs.fTau = 0.5;
std::vector<std::vector<cv::Point>> contours;
for(;;)
{
cap >> frame;
cv::blur(frame,blurred,cv::Size(10,10));
bgs.operator()(blurred,fg);
bgs.getBackgroundImage(bgmodel);
cv::threshold(fg,thresholded,70.0f,255,CV_THRESH_BINARY);
cv::threshold(fg,thresholded2,70.0f,255,CV_THRESH_BINARY);
cv::Mat element50(50,50,CV_8U,cv::Scalar(1));
cv::morphologyEx(thresholded,thresholded,cv::MORPH_CLOSE,element50);
cv::morphologyEx(thresholded2,thresholded2,cv::MORPH_CLOSE,element50);
cv::findContours(thresholded,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
cv::cvtColor(thresholded2,result,CV_GRAY2RGB);
int cmin= 50;
int cmax= 10000;
std::vector<std::vector<cv::Point>>::iterator itc=contours.begin();
while (itc!=contours.end()) {
if (itc->size() < cmin || itc->size() > cmax){
itc= contours.erase(itc);} else{
std::vector<cv::Point> pts = *itc;
cv::Mat pointsMatrix = cv::Mat(pts);
cv::Scalar color( 0, 255, 0 );
cv::Rect r0= cv::boundingRect(pointsMatrix);
cv::rectangle(result,r0,color,2);
++itc;
}
}
cv::imshow("Frame",result);
cv::imshow("Background Model",bgmodel);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
and the result here: FRAME
Thanks to elactic. :)
You can try to merge blogs with morphological closing (which is the erosion of the dilation of a binary image). You can use the CV functions erode and dilate for that.
This tutorial should help you.
I assume you will still have to filter blobs by size after that.
Hi i am doing a project to do an image 3d reconstruction. I am the phase of calibrating the camera, which is taking a long time to do. But when i compile the code and display the checkerboard in front of the camera it goes straight to exception error unhandled.
When picture not in frame, no error as soon as it gets in the frame, unhandled error occurs i don't know why.
I have asked a lot of people, no body can seem to help.
here is my code
#include <cv.h>
#include <highgui.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
int numBoards = 0;
int numCornersHor;
int numCornersVer;
printf("Enter number of corners along width: ");
scanf("%d", &numCornersHor);
printf("Enter number of corners along height: ");
scanf("%d", &numCornersVer);
printf("Enter number of boards: ");
scanf("%d", &numBoards);
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
VideoCapture capture = VideoCapture(0);
vector<vector<Point3d>> object_points;
vector<vector<Point2d>> image_points;
vector<Point2d> corners;
int successes=0;
Mat image;
Mat gray_image;
capture >> image;
vector<Point3d> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(Point3d(j/numCornersHor, j%numCornersHor, 0.0f));
while(successes<numBoards)
{
cvtColor(image, gray_image, CV_BGR2GRAY);
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
}
imshow("win1", image);
imshow("win2", gray_image);
capture >> image;
int key = waitKey(1);
if(key==27)
return 0;
if(key==' ' && found!=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
printf("Snap stored!\n");
successes++;
if(successes>=numBoards)
break;
}
}
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
Mat imageUndistorted;
while(1)
{
capture >> image;
undistort(image, imageUndistorted, intrinsic, distCoeffs);
imshow("win1", image);
imshow("win2", imageUndistorted);
waitKey(1);
}
capture.release();
return 0;
}
the error i get on the console is
OpenCV ERROR: Assertion failed (ncorners >=0 && corners.depth() == CV_32F) in unknown function file , file .....\src\opencv\modules\imgproc\src\cornersubpix.cpp, line 257.
and the error dialog says
Unhandled exception at 0x769afc16 in basiccalibration.exe: Microsoft C++ exception: cv::Exception at memory location 0x0021f51c..
Help would be appreciated.
Thanks
Use Point2f and Point3f instead of Point2d and Point3d. Read the assertion text please. It demands a CV_32F depth structure.