Related
I am using iPhone5s to do black object tracking: but often meet with
Thread 6:EXC_BAD_ACCESS(code 1, dress=0x8)
and then my App quit suddenly. Could anyone tell me why this happen?
this error happens at :
template<typename _Tp> inline
_Tp Rect_<_Tp>::area() const
{
return width * height; //Thread 6:EXC_BAD_ACCESS(code 1, dress=0x8)
}
//this method is in types.hp in latest opencv framework
my colored object recognition code is as below:
#pragma mark - Protocol CvVideoCameraDelegate
#ifdef __cplusplus
- (void)processImage:(cv::Mat &)image{
Mat imageCopy,imageCopy2;
cvtColor(image, imageCopy, COLOR_BGRA2BGR);
cvtColor(imageCopy, imageCopy2, COLOR_BGR2HSV);
//smooth the image
GaussianBlur(imageCopy2, imageCopy, cv::Size(5,5),0, 0);
cv::inRange(imageCopy, cv::Scalar(0,0,0,0), cv::Scalar(180,255,30,0),
imageCopy2);
/*****************************find the contour of the detected area abd draw it***********************************/
//2-D point to store countour
std::vector< std::vector<cv::Point>> contour1;
//do opening on the binary thresholded image
int erosionSize = 3;
Mat erodeElement =
getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(2*erosionSize+1,2* erosionSize+1), cv::Point(erosionSize,erosionSize));
erode(imageCopy2, imageCopy2, erodeElement);
dilate(imageCopy2, imageCopy2, erodeElement);
//Acual line to find the contour
cv::findContours(imageCopy2, contour1, RETR_EXTERNAL, CHAIN_APPROX_NONE);
//set the color used to draw the conotour
Scalar color1 = Scalar(50,50,50);
//loop the contour to draw the contour
for(int i=0; i< contour1.size(); i++){
drawContours(image, contour1, i, color1);
}
/******END*****/
/****************************find the contour of the detected area abd draw it***********************************/
/****************************Appproximate the contour to polygon && get bounded Rectangle and Circle*************/
std::vector<std::vector<cv::Point>> contours_poly(contour1.size());
std::vector<cv::Rect> boundedRect(contour1.size());
std::vector<cv::Point2f> circleCenter(contour1.size());
std::vector<float> circleRadius(contour1.size());
for (int i=0; i< contour1.size(); i++){
approxPolyDP(Mat(contour1[i]), contours_poly[i], 3, true);
boundedRect[i] = boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i], circleCenter[i], circleRadius[i]);
}
/******END*******/
/*****************************draw the rectangle for detected area ***********************************************/
Scalar recColor = Scalar(121,200,60);
Scalar fontColor = Scalar(0,0,225);
//find the largest contour
int largestContourIndex=0;
for (int i=0; i<contour1.size(); i++){
if(boundedRect[i].area()> boundedRect[largestContourIndex].area())
largestContourIndex=i;
}
int j=largestContourIndex;
if(boundedRect[j].area()>40){
rectangle(image, boundedRect[j].tl(), boundedRect[j].br(), recColor);
//show text at tl corner
cv::Point fontPoint = boundedRect[j].tl();
putText(image, "Black", fontPoint, FONT_HERSHEY_COMPLEX, 3, fontColor);
}
// cvtColor(imageCopy, image, COLOR_HLS2BGR);
}
#endif
Finally figure out why :
Just as #SolaWing said, there exist some null pointer. For future Viewers, just want to make it more clear:
Problem is at the following code:
if(boundedRect[j].area()>40){
rectangle(image, boundedRect[j].tl(), boundedRect[j].br(), recColor);
//show text at tl corner
cv::Point fontPoint = boundedRect[j].tl();
putText(image, "Black", fontPoint, FONT_HERSHEY_COMPLEX, 3, fontColor);
}
for this block of code, it already assume that there always exist detected areas, But Actually, when there is no target area in front of Phone Camera, the contour.size() is zero, that being said, for std::vector<cv::Rect> boundedRect(contour1.size()); boundedRect is null pointer, then there will be a problem when I use if(boundedRect[j].area()>40){}, which is using first pointer of the null pointer.
I am new to OpenCV and trying to find contours and draw rectangle on them, here's my code but its throwing cv::Exception when it comes to accumulatedweighted().
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis is a Example to implement CAMSHIFT to detect multiple motion objects.\n";
}
Rect rect;
VideoCapture capture;
Mat currentFrame, currentFrame_grey, differenceImg, oldFrame_grey,background;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
capture.open(0);
if(!capture.isOpened())
{
//error in opening the video input
cerr << "Unable to open video file: " /*<< videoFilename*/ << endl;
exit(EXIT_FAILURE);
}
//capture current frame from webcam
capture >> currentFrame;
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame.size().width; //img.size().width
imgSize.height = currentFrame.size().height; ////img.size().height
//Images to use in the program.
currentFrame_grey.create( imgSize, IPL_DEPTH_8U);//image.create().
while(1)
{
capture >> currentFrame;//VideoCapture& VideoCapture::operator>>(Mat& image)
//Convert the image to grayscale.
cvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);//cvtColor()
// Converting Original image to make both background n original image same
currentFrame.convertTo(currentFrame,CV_32FC3);
background = Mat::zeros(currentFrame.size(), CV_32FC3);
//Here its throwing exception
accumulateWeighted(currentFrame,background,1.0,NULL);
imshow("Background",background);
if(first) //Capturing Background for the first time
{
differenceImg = currentFrame_grey.clone();//img1 = img.clone()
oldFrame_grey = currentFrame_grey.clone();//img2 = img.clone()
convertScaleAbs(currentFrame_grey, oldFrame_grey, 1.0, 0.0);//convertscaleabs()
first = false;
continue;
}
//Minus the current frame from the moving average.
absdiff(oldFrame_grey,currentFrame_grey,differenceImg);//absDiff()
//bluring the differnece image
blur(differenceImg, differenceImg, imgSize);//blur()
//apply threshold to discard small unwanted movements
threshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);//threshold()
//find contours
findContours(differenceImg,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); //findcontours()
//draw bounding box around each contour
//for(; contours! = 0; contours = contours->h_next)
for(int i = 0; i < contours.size(); i++)
{
rect = boundingRect(contours[i]); //extract bounding box for current contour
//drawing rectangle
rectangle(currentFrame, cvPoint(rect.x, rect.y), cvPoint(rect.x+rect.width, rect.y+rect.height), cvScalar(0, 0, 255, 0), 2, 8, 0);
}
//New Background
convertScaleAbs(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//display colour image with bounding box
imshow("Output Image", currentFrame);//imshow()
//display threshold image
imshow("Difference image", differenceImg);//imshow()
//clear memory and contours
//cvClearMemStorage( storage );
//contours = 0;
contours.clear();
//background = currentFrame;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy All Windows.
destroyAllWindows();
return 0;
}
Please Help to solve this.
First of all, I don't really get the idea of calling accumulateWeighted with alpha = 1.0. If you look at the definition of accumulateWeighted in the doc, you will see that with alpha = 1.0 it is basically equivalent to copy currentFrame into background at each iteration.
Moreover, it is an accumulation function, to accumulate image changes over time into a new image. What is the interest of it if you reset background at every loop with background = Mat::zeros(currentFrame.size(), CV_32FC3); ?
This being said, there is a little flaw in your code with the 4th argument of the function. You wrote accumulateWeighted(currentFrame,background,1.0,NULL);. If you look into the documentation you will find that the 4th argument is a Mask, and is optional. Passing a NULL pointer here might be the source of your exception. Why don't you call the function like this : accumulateWeighted(currentFrame,background,1.0); ?
Hope this helps,
Ben
I have x-ray image of a hand. I need to extract bones automatically. I can easily segmentate a hand using different techniques. But I need to get bones and using those techniques don't help. Some of the bones are brighter then orthers, so if I use thresholding some of them disapear while others become clearer rising threshold. And I think maybe I should threshold a region of the hand only? Is it possible to threshold ROI that is not a square? O maybe you have any other solutions, advices? Maybe there are some libraries like OpenCV or something for that? Any help would be very great!
Extended:
Raw Image Expected Output
One approach could be to segment the hand and fingers from the image:
And then creating another image with just the hand silhouette:
Once you have the silhouette you can erode the image to make it a little smaller. This is used to subtract the hand from the hand & fingers image, resulting in the fingers:
The code below shows to execute this approach:
void detect_hand_and_fingers(cv::Mat& src);
void detect_hand_silhoutte(cv::Mat& src);
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
if (img.empty())
{
std::cout << "!!! imread() failed to open target image" << std::endl;
return -1;
}
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
cv::Mat gray_silhouette = gray.clone();
/* Isolate Hand + Fingers */
detect_hand_and_fingers(gray);
cv::imshow("Hand+Fingers", gray);
cv::imwrite("hand_fingers.png", gray);
/* Isolate Hand Sillhoute and subtract it from the other image (Hand+Fingers) */
detect_hand_silhoutte(gray_silhouette);
cv::imshow("Hand", gray_silhouette);
cv::imwrite("hand_silhoutte.png", gray_silhouette);
/* Subtract Hand Silhoutte from Hand+Fingers so we get only Fingers */
cv::Mat fingers = gray - gray_silhouette;
cv::imshow("Fingers", fingers);
cv::imwrite("fingers_only.png", fingers);
cv::waitKey(0);
return 0;
}
void detect_hand_and_fingers(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,3), cv::Point(1,1));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_GAUSSIAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
9, -5);
int dilate_sz = 1;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz, 2*dilate_sz),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
}
void detect_hand_silhoutte(cv::Mat& src)
{
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7), cv::Point(3, 3));
cv::morphologyEx(src, src, cv::MORPH_ELLIPSE, kernel);
int adaptiveMethod = CV_ADAPTIVE_THRESH_MEAN_C; // CV_ADAPTIVE_THRESH_MEAN_C, CV_ADAPTIVE_THRESH_GAUSSIAN_C
cv::adaptiveThreshold(src, src, 255,
adaptiveMethod, CV_THRESH_BINARY,
251, 5); // 251, 5
int erode_sz = 5;
cv::Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*erode_sz + 1, 2*erode_sz+1),
cv::Point(erode_sz, erode_sz) );
cv::erode(src, src, element);
int dilate_sz = 1;
element = cv::getStructuringElement(cv::MORPH_ELLIPSE,
cv::Size(2*dilate_sz + 1, 2*dilate_sz+1),
cv::Point(dilate_sz, dilate_sz) );
cv::dilate(src, src, element);
cv::bitwise_not(src, src);
}
I am working on an implementation where I have a rectangle shaped image in an big background image. I am trying to programmatically retrieve the rectangle shaped image from the big image and retrieve text information from that particular rectangle image. I am trying to use Open-CV third party framework, but couldn't able to retrieve the rectangle image from the big background image. Could someone please guide me, how i can achieve this?
UPDATED:
I found the Link to find out the square shapes using OpenCV. Can i get it modified for finding Rectangle shapes? Can someone guide me on this?
UPDATED LATEST:
I got the code finally, here is it below.
- (cv::Mat)cvMatWithImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if ( cvMat.elemSize() == 1 ) {
colorSpace = CGColorSpaceCreateDeviceGray();
}
else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
//CFDataRef data;
CGDataProviderRef provider = CGDataProviderCreateWithCFData( (CFDataRef) data ); // It SHOULD BE (__bridge CFDataRef)data
CGImageRef imageRef = CGImageCreate( cvMat.cols, cvMat.rows, 8, 8 * cvMat.elemSize(), cvMat.step[0], colorSpace, kCGImageAlphaNone|kCGBitmapByteOrderDefault, provider, NULL, false, kCGRenderingIntentDefault );
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease( imageRef );
CGDataProviderRelease( provider );
CGColorSpaceRelease( colorSpace );
return finalImage;
}
-(void)forOpenCV
{
imageView = [UIImage imageNamed:#"myimage.jpg"];
if( imageView != nil )
{
cv::Mat tempMat = [imageView CVMat];
cv::Mat greyMat = [self cvMatWithImage:imageView];
cv::vector<cv::vector<cv::Point> > squares;
cv::Mat img= [self debugSquares: squares: greyMat];
imageView = [self UIImageFromCVMat: img];
self.imageView.image = imageView;
}
}
double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
- (cv::Mat) debugSquares: (std::vector<std::vector<cv::Point> >) squares : (cv::Mat &)image
{
NSLog(#"%lu",squares.size());
// blur will enhance edge detection
//cv::Mat blurred(image);
cv::Mat blurred = image.clone();
medianBlur(image, blurred, 9);
cv::Mat gray0(image.size(), CV_8U), gray;
cv::vector<cv::vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&image, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, cv::Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
cv::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(cv::Mat(contours[i]), approx, arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(cv::Mat(approx))) > 1000 &&
isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
NSLog(#"squares.size(): %lu",squares.size());
for( size_t i = 0; i < squares.size(); i++ )
{
cv::Rect rectangle = boundingRect(cv::Mat(squares[i]));
NSLog(#"rectangle.x: %d", rectangle.x);
NSLog(#"rectangle.y: %d", rectangle.y);
if(i==squares.size()-1)////Detecting Rectangle here
{
const cv::Point* p = &squares[i][0];
int n = (int)squares[i].size();
NSLog(#"%d",n);
line(image, cv::Point(507,418), cv::Point(507+1776,418+1372), cv::Scalar(255,0,0),2,8);
polylines(image, &p, &n, 1, true, cv::Scalar(255,255,0), 5, CV_AA);
int fx1=rectangle.x;
NSLog(#"X: %d", fx1);
int fy1=rectangle.y;
NSLog(#"Y: %d", fy1);
int fx2=rectangle.x+rectangle.width;
NSLog(#"Width: %d", fx2);
int fy2=rectangle.y+rectangle.height;
NSLog(#"Height: %d", fy2);
line(image, cv::Point(fx1,fy1), cv::Point(fx2,fy2), cv::Scalar(0,0,255),2,8);
}
}
return image;
}
Thank you.
Here is a full answer using a small wrapper class to separate the c++ from objective-c code.
I had to raise another question on stackoverflow to deal with my poor c++ knowledge - but I have worked out everything we need to interface c++ cleanly with objective-c code, using the squares.cpp sample code as an example. The aim is to keep the original c++ code as pristine as possible, and to keep the bulk of the work with openCV in pure c++ files for (im)portability.
I have left my original answer in place as this seems to go beyond an edit. The complete demo project is on github
CVViewController.h / CVViewController.m
pure Objective-C
communicates with openCV c++ code via a WRAPPER... it neither knows nor cares that c++ is processing these method calls behind the wrapper.
CVWrapper.h / CVWrapper.mm
objective-C++
does as little as possible, really only two things...
calls to UIImage objC++ categories to convert to and from UIImage <> cv::Mat
mediates between CVViewController's obj-C methods and CVSquares c++ (class) function calls
CVSquares.h / CVSquares.cpp
pure C++
CVSquares.cpp declares public functions inside a class definition (in this case, one static function).
This replaces the work of main{} in the original file.
We try to keep CVSquares.cpp as close as possible to the C++ original for portability.
CVViewController.m
//remove 'magic numbers' from original C++ source so we can manipulate them from obj-C
#define TOLERANCE 0.01
#define THRESHOLD 50
#define LEVELS 9
UIImage* image =
[CVSquaresWrapper detectedSquaresInImage:self.image
tolerance:TOLERANCE
threshold:THRESHOLD
levels:LEVELS];
CVSquaresWrapper.h
// CVSquaresWrapper.h
#import <Foundation/Foundation.h>
#interface CVSquaresWrapper : NSObject
+ (UIImage*) detectedSquaresInImage:(UIImage*)image
tolerance:(CGFloat)tolerance
threshold:(NSInteger)threshold
levels:(NSInteger)levels;
#end
CVSquaresWrapper.mm
// CVSquaresWrapper.mm
// wrapper that talks to c++ and to obj-c classes
#import "CVSquaresWrapper.h"
#import "CVSquares.h"
#import "UIImage+OpenCV.h"
#implementation CVSquaresWrapper
+ (UIImage*) detectedSquaresInImage:(UIImage*) image
tolerance:(CGFloat)tolerance
threshold:(NSInteger)threshold
levels:(NSInteger)levels
{
UIImage* result = nil;
//convert from UIImage to cv::Mat openCV image format
//this is a category on UIImage
cv::Mat matImage = [image CVMat];
//call the c++ class static member function
//we want this function signature to exactly
//mirror the form of the calling method
matImage = CVSquares::detectedSquaresInImage (matImage, tolerance, threshold, levels);
//convert back from cv::Mat openCV image format
//to UIImage image format (category on UIImage)
result = [UIImage imageFromCVMat:matImage];
return result;
}
#end
CVSquares.h
// CVSquares.h
#ifndef __OpenCVClient__CVSquares__
#define __OpenCVClient__CVSquares__
//class definition
//in this example we do not need a class
//as we have no instance variables and just one static function.
//We could instead just declare the function but this form seems clearer
class CVSquares
{
public:
static cv::Mat detectedSquaresInImage (cv::Mat image, float tol, int threshold, int levels);
};
#endif /* defined(__OpenCVClient__CVSquares__) */
CVSquares.cpp
// CVSquares.cpp
#include "CVSquares.h"
using namespace std;
using namespace cv;
static int thresh = 50, N = 11;
static float tolerance = 0.01;
//declarations added so that we can move our
//public function to the top of the file
static void findSquares( const Mat& image, vector<vector<Point> >& squares );
static void drawSquares( Mat& image, vector<vector<Point> >& squares );
//this public function performs the role of
//main{} in the original file (main{} is deleted)
cv::Mat CVSquares::detectedSquaresInImage (cv::Mat image, float tol, int threshold, int levels)
{
vector<vector<Point> > squares;
if( image.empty() )
{
cout << "Couldn't load " << endl;
}
tolerance = tol;
thresh = threshold;
N = levels;
findSquares(image, squares);
drawSquares(image, squares);
return image;
}
// the rest of this file is identical to the original squares.cpp except:
// main{} is removed
// this line is removed from drawSquares:
// imshow(wndname, image);
// (obj-c will do the drawing)
UIImage+OpenCV.h
The UIImage category is an objC++ file containing the code to convert between UIImage and cv::Mat image formats. This is where you move your two methods -(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat and - (cv::Mat)cvMatWithImage:(UIImage *)image
//UIImage+OpenCV.h
#import <UIKit/UIKit.h>
#interface UIImage (UIImage_OpenCV)
//cv::Mat to UIImage
+ (UIImage *)imageFromCVMat:(cv::Mat&)cvMat;
//UIImage to cv::Mat
- (cv::Mat)CVMat;
#end
The method implementations here are unchanged from your code (although we don't pass a UIImage in to convert, instead we refer to self)
Here is a partial answer. It is not complete because I am attempting to do the exact same thing and experiencing huge difficulties every step of the way. My knowledge is quite strong on objective-c but really weak on C++
You should read this guide to wrapping c++
And everything on Ievgen Khvedchenia's Computer Vision Talks blog, especially the openCV tutorial. Ievgen has also posted an amazingly complete project on github to go with the tutorial.
Having said that, I am still having a lot of trouble getting openCV to compile and run smoothly.
For example, Ievgen's tutorial runs fine as a finished project, but if I try to recreate it from scratch I get the same openCV compile errors that have been plaguing me all along. It's probably my poor understanding of C++ and it's integration with obj-C.
Regarding squares.cpp
What you need to do
remove int main(int /*argc*/, char** /*argv*/) from squares.cpp
remove imshow(wndname, image); from drawSquares (obj-c will do the drawing)
create a header file squares.h
make one or two public functions in the header file which you can call from obj-c (or from an obj-c/c++ wrapper)
Here is what I have so far...
class squares
{
public:
static cv::Mat& findSquares( const cv::Mat& image, cv::vector<cv::vector<cv::Point> >& squares );
static cv::Mat& drawSquares( cv::Mat& image, const cv::vector<cv::vector<cv::Point> >& squares );
};
you should be able to reduce this to a single method, say processSquares with one input cv::Mat& image and one return cv::Mat& image. That method would declare squares and call findSquares and drawSquares within the .cpp file.
The wrapper will take an input UIImage, convert it to cv::Mat image, call processSquares with that input, and get a result cv::Mat image. That result it will convert back to NSImage and pass back to the objc calling function.
SO that's a neat sketch of what we need to do, I will try and expand this answer once I've actually managed to do any of it!
I was trying to sharpening on some standard image from Gonzalez books. Below are some code that I have tried but it doesn't get closer to the results of the sharpened image.
cvSmooth(grayImg, grayImg, CV_GAUSSIAN, 3, 0, 0, 0);
IplImage* laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_16S, 1);
IplImage* abs_laplaceImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvLaplace(grayImg, laplaceImg, 3);
cvConvertScaleAbs(laplaceImg, abs_laplaceImg, 1, 0);
IplImage* dstImg = cvCreateImage(cvGetSize(oriImg), IPL_DEPTH_8U, 1);
cvAdd(abs_laplaceImg, grayImg, dstImg, NULL);
Before Sharpening
My Sharpening Result
Desired Result
Absolute Laplace
I think the problem is that you are blurring the image before take the 2nd derivate.
Here is the working code with the C++ API (I'm using Opencv 2.4.3). I tried also with MATLAB and the result is the same.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int /*argc*/, char** /*argv*/) {
Mat img, imgLaplacian, imgResult;
//------------------------------------------------------------------------------------------- test, first of all
// now do it by hand
img = (Mat_<uchar>(4,4) << 0,1,2,3,4,5,6,7,8,9,0,11,12,13,14,15);
// first, the good result
Laplacian(img, imgLaplacian, CV_8UC1);
cout << "let opencv do it" << endl;
cout << imgLaplacian << endl;
Mat kernel = (Mat_<float>(3,3) <<
0, 1, 0,
1, -4, 1,
0, 1, 0);
int window_size = 3;
// now, reaaallly by hand
// note that, for avoiding padding, the result image will be smaller than the original one.
Mat frame, frame32;
Rect roi;
imgLaplacian = Mat::zeros(img.size(), CV_32F);
for(int y=0; y<img.rows-window_size/2-1; y++) {
for(int x=0; x<img.cols-window_size/2-1; x++) {
roi = Rect(x,y, window_size, window_size);
frame = img(roi);
frame.convertTo(frame, CV_32F);
frame = frame.mul(kernel);
float v = sum(frame)[0];
imgLaplacian.at<float>(y,x) = v;
}
}
imgLaplacian.convertTo(imgLaplacian, CV_8U);
cout << "dudee" << imgLaplacian << endl;
// a little bit less "by hand"..
// using cv::filter2D
filter2D(img, imgLaplacian, -1, kernel);
cout << imgLaplacian << endl;
//------------------------------------------------------------------------------------------- real stuffs now
img = imread("moon.jpg", 0); // load grayscale image
// ok, now try different kernel
kernel = (Mat_<float>(3,3) <<
1, 1, 1,
1, -8, 1,
1, 1, 1); // another approximation of second derivate, more stronger
// do the laplacian filtering as it is
// well, we need to convert everything in something more deeper then CV_8U
// because the kernel has some negative values,
// and we can expect in general to have a Laplacian image with negative values
// BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255
// so the possible negative number will be truncated
filter2D(img, imgLaplacian, CV_32F, kernel);
img.convertTo(img, CV_32F);
imgResult = img - imgLaplacian;
// convert back to 8bits gray scale
imgResult.convertTo(imgResult, CV_8U);
imgLaplacian.convertTo(imgLaplacian, CV_8U);
namedWindow("laplacian", CV_WINDOW_AUTOSIZE);
imshow( "laplacian", imgLaplacian );
namedWindow("result", CV_WINDOW_AUTOSIZE);
imshow( "result", imgResult );
while( true ) {
char c = (char)waitKey(10);
if( c == 27 ) { break; }
}
return 0;
}
Have fun!
I think the main problem lies in the fact that you do img + laplace, while img - laplace would give better results. I remember that img - 2*laplace was best, but I cannot find where I read that, probably in one of the books I read in university.
You need to do img - laplace instead of img + laplace.
laplace: f(x,y) = f(x-1,y+1) + f(x-1,y-1) + f(x,y+1) + f(x+1,y) - 4*f(x,y)
So, if you see subtract laplace from the original image you would see that the minus sign in front of 4*f(x,y) gets negated and this term becomes positive.
You could also have kernel with -5 in the center pixel instead of -4 to make the laplacian a one-step process instead of getting the getting the laplace and doing img - laplace Why? Try deriving that yourself.
This would be the final kernel.
Mat kernel = (Mat_(3,3) <<
-1, 0, -1,
0, -5, 0,
-1, 0, -1);
It is indeed a well-known result in image processing that if you subtract its Laplacian from an image, the image edges are amplified giving a sharper image.
Laplacian Filter Kernel algorithm: sharpened_pixel = 5 * current – left – right – up – down
enter image description here
So the Code will look like these:
void sharpen(const Mat& img, Mat& result)
{
result.create(img.size(), img.type());
//Processing the inner edge of the pixel point, the image of the outer edge of the pixel should be additional processing
for (int row = 1; row < img.rows-1; row++)
{
//Front row pixel
const uchar* previous = img.ptr<const uchar>(row-1);
//Current line to be processed
const uchar* current = img.ptr<const uchar>(row);
//new row
const uchar* next = img.ptr<const uchar>(row+1);
uchar *output = result.ptr<uchar>(row);
int ch = img.channels();
int starts = ch;
int ends = (img.cols - 1) * ch;
for (int col = starts; col < ends; col++)
{
//The traversing pointer of the output image is synchronized with the current row, and each channel value of each pixel in each row is given a increment, because the channel number of the image is to be taken into account.
*output++ = saturate_cast<uchar>(5 * current[col] - current[col-ch] - current[col+ch] - previous[col] - next[col]);
}
} //end loop
//Processing boundary, the peripheral pixel is set to 0
result.row(0).setTo(Scalar::all(0));
result.row(result.rows-1).setTo(Scalar::all(0));
result.col(0).setTo(Scalar::all(0));
result.col(result.cols-1).setTo(Scalar::all(0));
}
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
ggicci::sharpen(lena, sharpenedLena);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
If you are a lazier. Have fun with the following.
int main()
{
Mat lena = imread("lena.jpg");
Mat sharpenedLena;
Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 4, -1, 0, -1, 0);
cv::filter2D(lena, sharpenedLena, lena.depth(), kernel);
imshow("lena", lena);
imshow("sharpened lena", sharpenedLena);
cvWaitKey();
return 0;
}
And the result like these.enter image description here