Cleaning up captcha image use OpenCV - ios

How to clear the captcha picture of the interference line, when the interference line and the text of the same color
I try to use the following picture as a demo
demo picture
Use the following code processing
- (UIImage *)cleanLine:(UIImage *)image {
IplImage *src = [self convertToIplImage:image];
IplImage *gray = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage *dst = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
IplImage *binary = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
cvCvtColor(src, gray, CV_RGB2GRAY);
cvThreshold(gray, binary, 120, 255, CV_THRESH_OTSU);
findLines(gray, dst);
for (int row = 0; row < binary->height; row++)
for (int col = 0; col < binary->width; col++)
{
if (cvGet2D(dst, row, col).val[0] == 255)
{
int up = 0, down = 0;
int white = 0;
for (int i = row; i >= 0; i--)
{
if (cvGet2D(binary, i, col).val[0] == 0)
{
up++;
white = 0;
}
else white++;
if(white > 2) break;
}
white = 0;
for (int i = row; i < binary->height; i++)
{
if (cvGet2D(binary, i, col).val[0] == 0)
{
down++;
white = 0;
}
else white++;
if (white > 2) break;
}
if (up + down < 8)
{
for (int i = -up; i <= down; i++) cvSet2D(binary, row + i, col, cvScalar(255));
}
}
}
erase(binary);
cvErode(binary, binary, NULL, 1);
cvDilate(binary, binary, NULL, 1);
Mat Img = cvarrToMat(binary);
cvReleaseImage(&src);
cvReleaseImage(&gray);
cvReleaseImage(&dst);
cvReleaseImage(&binary);
return MatToUIImage(Img);
}
Find the line of interference code
void findLines(IplImage *raw, IplImage *dst) {
IplImage *src = cvCloneImage(raw);
IplImage *canny = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
cvCanny(src, canny, 20, 200, 3);
CvMemStorage *stor = cvCreateMemStorage(0);
CvSeq *lines = NULL;
lines = cvHoughLines2(canny, stor, CV_HOUGH_PROBABILISTIC, 1, CV_PI / 180, 80, 200, 30);
cvZero(dst);
CvPoint maxStart, maxEnd;
int maxDistance = 0;
for (int i = 0; i < lines->total; i++) {
CvPoint* line = (CvPoint*)cvGetSeqElem(lines, i);
if (abs(line[0].x - line[1].x) > maxDistance) {
maxDistance = abs(line[0].x - line[1].x);
maxStart = line[0];
maxEnd = line[1];
}
}
cvLine(dst, maxStart, maxEnd, cvScalar(255), 1);
cvReleaseImage(&src);
cvReleaseMemStorage(&stor);
}
Get the result picture result picture
I have a part of the captcha image here
captcha image
I used my code to process the captcha image, but it did not work
I do not know how to modify my code to make it clear the interference line
Anyone can guide me, thank you very much

Related

How to calculate perimeter of a binary image using OpenCV 4.2 in C++

I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}

Simple Image Recognition in processing.org

I'm trying to write the simplest possible image comparison function.
The idea is to have a target image and a collection of n number of different images.
The goal is to pick the image which is most similar to the target image.
So far my method consists of defining the euclidean HSB distance from pixel to pixel in a resized image and have been trying to do a PImage function that returns the winner image. I already wrote a float function that ranks the image list from winner to loser but I would like to skip this step to make the process way more concise.
The issue is at the PImage difference(PImage){
function, the program outputs an error on the line:
float x1 = brightness(imageKey.pixels[i]);
The error is ArrayIndexOutOfBoundsException
Here's the entire code:
//CLICK ON S TO SAVE FRAMES TO FOLDER
int series = 50; //
PImage[] collection = new PImage[series];
PImage imageKey,imageKey2, imageKeyHUE, imageKeySUM, imageKeySAT; //target image alias with ready operations
int imageWidth = 800;
int leftAlign = 850 ;
void setup()
{
size(1200,600);
background(255);
frameRate(random(1,10.0));
for ( int i = 0; i< collection.length; i++ )
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
}
//_____________________________________________TARGET IMAGE AND NAME TEXT
textSize(10);
fill(0);
text("target image", leftAlign, 220);
textSize(15);
text("central london", leftAlign, 240);
text("comparison methods", leftAlign, 290);
//_____________________________________________________________________BUTTONS
imageKey = loadImage("Image_0.jpg");
imageKey.resize(240, 180);
image(imageKey, leftAlign,25);
imageKeySAT= loadImage("Image_0.jpg");
imageKeySAT.resize(60,60);
imageKeySAT = saturation(imageKeySAT);
image(imageKeySAT, leftAlign+140,300);
imageKeySUM = loadImage("Image_0.jpg");
imageKeySUM.resize(60,60);
imageKeySUM = sum(imageKeySUM);
image(imageKeySUM, leftAlign+70,300);
imageKeyHUE = loadImage("Image_0.jpg");
imageKeyHUE.resize(60,60);
imageKeyHUE = hue(imageKeyHUE);
image(imageKeyHUE, leftAlign,300);
textSize(20);
text("CLICK HERE TO", leftAlign, 430);
text("STOP AT WINNER", leftAlign, 450);
}
void draw()
{
//______________________________________________SHOW IMAGES ARRAY
image(collection[int(random(0,series))],0,0);
//______________________________________________HISTOGRAMS
histogramhue();
histogramsat();
histogrambright();
//______________________________________________SUM METHOD
//float Vector_Approach(PImage sumSatin){
//}
}
void keyPressed()
{
if(key=='s') saveFrame("images/image-######.jpg");
}
PImage difference(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
imageKey.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for(int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float x1 = brightness(imageKey.pixels[i]);
float b0 = brightness(satin.pixels[i]);
// float y1 = brightness(satin.pixels[i+1]);
float value = x1-b0;
satout.pixels[i] = color(0,0,value);
}
satout.updatePixels();
return satout;
}
void mouseReleased(){
//______________________________________________BUTTON OVER
for ( int i = 0; i< collection.length; i++ )
if (mouseX > leftAlign && mouseX < (leftAlign + 60) && mouseY > 300 && mouseY < 360){
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = hue(collection[i]); histogramhue();
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by hue", leftAlign, 380);
} else if (mouseX > (leftAlign + 70) && mouseX < (leftAlign + 130) && mouseY > 300 && mouseY < 360)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = sum(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by sum", leftAlign, 380);
}else if (mouseX > (leftAlign + 140) && mouseX < (leftAlign + 200) && mouseY > 300 && mouseY < 360)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = saturation(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by saturation", leftAlign, 380);
}else if (mouseX > leftAlign && mouseX < 1200 && mouseY > 340 && mouseY < 600)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = difference(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("WINNER IMAGE!!!!", leftAlign, 380);
}else{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
}
}
/* HSB PImage Methods */
//HUE -------> /** CHOSEN METHOD**/
//SATURATION -------> /** CHOSEN METHOD**/
//SUM -------> /** CHOSEN METHOD**/
PImage hue(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, HSB);
satout.loadPixels();
for (int j = 0; j < satout.pixels.length; j++)
{
satout.pixels[j] = color(hue(satin.pixels[j]),255,255);
}
satout.updatePixels();
return satout;
}
PImage saturation(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for (int j = 0; j < satout.pixels.length; j++)
{
satout.pixels[j] = color(saturation(satin.pixels[j]));
}
satout.updatePixels();
//colorMode(RGB);
return satout;
}
PImage sum(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for(int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float b0 = brightness(satin.pixels[i]);
float x1 = brightness(satin.pixels[i-1]);
float y1 = brightness(satin.pixels[i-imageWidth]);
float xdiff = b0-x1;
float ydiff = b0-y1;
float value = (510 + xdiff + ydiff)/3;
satout.pixels[i] = color(0,0,value);
}
satout.updatePixels();
return satout;
}
//REFERENCE HISTOGRAM TAKEN FROM A PROGRAMMING HANDBOOK FOR VISUAL DESIGNERS AND ARTISTS BY BEN FRY ET AL
void histogramhue(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
// Calculate the histogram
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int hue = int(hue(get(i, j)));
hist[hue]++;
}
}
int histMax = max(hist);
stroke(255,250); strokeWeight(5);
// Draw half of the histogram (skip every second value)
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
void histogramsat(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int sat = int(saturation(get(i, j)));
hist[sat]++;
}
}
int histMax = max(hist);
stroke(255,150);strokeWeight(10);
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
void histogrambright(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int bright = int(brightness(get(i, j)));
hist[bright]++;
}
}
int histMax = max(hist);
stroke(255, 150);strokeWeight(20);
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
In isolation your function does seem to work:
PImage imageKey,testImage;
int imageWidth = 800;
int imageHeight = 600;
void setup(){
size(1600,600);
//fake imageKey
imageKey = getNoise(imageWidth,imageHeight);
//fake test image
testImage = getNoise(imageWidth,imageHeight);
image(testImage,0,0);
image(difference(testImage),800,0);
}
PImage getNoise(int width,int height){
PImage out = createImage(width,height,RGB);
for(int i = 0 ; i < out.pixels.length; i++)
out.pixels[i] = color(random(255),random(255),random(255));
out.updatePixels();
return out;
}
PImage difference(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
imageKey.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for (int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float x1 = brightness(imageKey.pixels[i]);
float b0 = brightness(satin.pixels[i]);
// float y1 = brightness(satin.pixels[i+1]);
float value = x1-b0;
//println(i,x1,b0,x1-b0,value);
satout.pixels[i] = color(0, 0, value);
}
satout.updatePixels();
return satout;
}
I can't test your actual setup as I don't have access to your images, but the ArrayIndexOutOfBoundsException is probably because your i counter goes beyond the number of pixels in imageKey. You can test this by putting checking if i < imageKey.pixels.length. My guess is the images aren't the same dimensions and therefore don't have the same number of pixels.
Other notes that are going slightly off-topic:
Your difference() function is tightly coupled to the imageKey and imageWidth variables. You might want to make your functions loosely coupled so they can be reused easily in other contexts. You could start by making these two variables extra parameters/arguments of the function
You might also want to look at euclidean distance between colours (in a perceptual colour space such as Lab*). Have a look at this answer.Even though it's an OpenFrameworks answer, it should be easy to adapt to Processing's color and PVector types.

camera-projector calibration for processing

I want to use some code like this in processing, since I am not familiar with openFrameworks.
https://www.youtube.com/watch?v=pCq7u2TvlxU&list=UUtYM3-7ldtX7kf_sSoHt1Pw&index=1&feature=plcp
Any chance any one have heard of a project like this for processing?
Since I am not a programmer, I am trying to use the CalibrationDemo example mixed with MarkerDetection (from opencv for processing library)- wondering if I can get some transformation matrix from the checkbox plane and the camera at first...
Most of the examples and tutorials about opencv are written in C, so I am having hard time to understand some definitions without a practical example.
Below, a work-in-progress code. It is not working for what I want. As I said, its a mix from two examples in processing opencv library. My first goal is to extract the transformation matrix of the plane of the checkbox.
import gab.opencv.*;
import org.opencv.imgproc.Imgproc;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.CvType;
import org.opencv.core.Point;
import org.opencv.core.Size;
import processing.video.*;
//import java.util.list;
OpenCV opencv;
Capture cam;
PImage src, dst, markerImg;
ArrayList<MatOfPoint> contours;
ArrayList<MatOfPoint2f> approximations;
ArrayList<MatOfPoint2f> markers;
boolean[][] markerCells;
void setup() {
size(1000, 365);
//1000 × 730
cam = new Capture(this, 800, 480);
cam.start();
//src = cam.get();//opencv.getInput();
opencv = new OpenCV(this, 800, 480);
}
void update() {
//src = opencv.getInput();
opencv.loadImage(src);
// hold on to this for later, since adaptiveThreshold is destructive
Mat gray = OpenCV.imitate(opencv.getGray());
opencv.getGray().copyTo(gray);
Mat thresholdMat = OpenCV.imitate(opencv.getGray());
opencv.blur(5);
Imgproc.adaptiveThreshold(opencv.getGray(), thresholdMat, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV, 451, -65);
contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(thresholdMat, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE);
image(opencv.getOutput(), 0, 0);
approximations = createPolygonApproximations(contours);
markers = new ArrayList<MatOfPoint2f>();
markers = selectMarkers(approximations);
MatOfPoint2f canonicalMarker = new MatOfPoint2f();
Point[] canonicalPoints = new Point[4];
canonicalPoints[0] = new Point(0, 350);
canonicalPoints[1] = new Point(0, 0);
canonicalPoints[2] = new Point(350, 0);
canonicalPoints[3] = new Point(350, 350);
canonicalMarker.fromArray(canonicalPoints);
if (markers.size() <= 0) return;
println("num points: " + markers.get(0).height());
Mat transform = Imgproc.getPerspectiveTransform(markers.get(0), canonicalMarker);
Mat unWarpedMarker = new Mat(50, 50, CvType.CV_8UC1);
Imgproc.warpPerspective(gray, unWarpedMarker, transform, new Size(350, 350));
Imgproc.threshold(unWarpedMarker, unWarpedMarker, 125, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);
float cellSize = 350/7.0;
markerCells = new boolean[7][7];
for (int row = 0; row < 7; row++) {
for (int col = 0; col < 7; col++) {
int cellX = int(col*cellSize);
int cellY = int(row*cellSize);
Mat cell = unWarpedMarker.submat(cellX, cellX +(int)cellSize, cellY, cellY+ (int)cellSize);
markerCells[row][col] = (Core.countNonZero(cell) > (cellSize*cellSize)/2);
}
}
for (int col = 0; col < 7; col++) {
for (int row = 0; row < 7; row++) {
if (markerCells[row][col]) {
print(1);
} else {
print(0);
}
}
println();
}
dst = createImage(350, 350, RGB);
opencv.toPImage(unWarpedMarker, dst);
}
ArrayList<MatOfPoint2f> selectMarkers(ArrayList<MatOfPoint2f> candidates) {
float minAllowedContourSide = 50;
minAllowedContourSide = minAllowedContourSide * minAllowedContourSide;
ArrayList<MatOfPoint2f> result = new ArrayList<MatOfPoint2f>();
for (MatOfPoint2f candidate : candidates) {
if (candidate.size().height != 4) {
continue;
}
if (!Imgproc.isContourConvex(new MatOfPoint(candidate.toArray()))) {
continue;
}
// eliminate markers where consecutive
// points are too close together
float minDist = src.width * src.width;
Point[] points = candidate.toArray();
for (int i = 0; i < points.length; i++) {
Point side = new Point(points[i].x - points[(i+1)%4].x, points[i].y - points[(i+1)%4].y);
float squaredLength = (float)side.dot(side);
// println("minDist: " + minDist + " squaredLength: " +squaredLength);
minDist = min(minDist, squaredLength);
}
// println(minDist);
if (minDist < minAllowedContourSide) {
continue;
}
result.add(candidate);
}
return result;
}
ArrayList<MatOfPoint2f> createPolygonApproximations(ArrayList<MatOfPoint> cntrs) {
ArrayList<MatOfPoint2f> result = new ArrayList<MatOfPoint2f>();
double epsilon = cntrs.get(0).size().height * 0.01;
println(epsilon);
for (MatOfPoint contour : cntrs) {
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(contour.toArray()), approx, epsilon, true);
result.add(approx);
}
return result;
}
void drawContours(ArrayList<MatOfPoint> cntrs) {
for (MatOfPoint contour : cntrs) {
beginShape();
Point[] points = contour.toArray();
for (int i = 0; i < points.length; i++) {
vertex((float)points[i].x, (float)points[i].y);
}
endShape();
}
}
void drawContours2f(ArrayList<MatOfPoint2f> cntrs) {
for (MatOfPoint2f contour : cntrs) {
beginShape();
Point[] points = contour.toArray();
for (int i = 0; i < points.length; i++) {
vertex((float)points[i].x, (float)points[i].y);
}
endShape(CLOSE);
}
}
void draw() {
//VIDEO
if (!cam.available()) {
println("no video available");
return;
}
cam.read();
src = cam.get();
pushMatrix();
background(125);
scale(0.7);
//image(src, 0, 0);
update();
noFill();
smooth();
strokeWeight(5);
stroke(0, 255, 0);
if (markers.size() > 0) drawContours2f(markers);
popMatrix();
if (markers.size() <= 0) {
drawContours2f(markers);
return;
}
pushMatrix();
translate(200 + src.width/2, 0);
strokeWeight(1);
image(dst, 0, 0);
float cellSize = dst.width/7.0;
for (int col = 0; col < 7; col++) {
for (int row = 0; row < 7; row++) {
if (markerCells[row][col]) {
fill(255);
} else {
fill(0);
}
stroke(0, 255, 0);
rect(col*cellSize, row*cellSize, cellSize, cellSize);
}
}
popMatrix();
}
Any help or indication would be great!
B

Cropping panorama image in OpenCV

I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.

Sizes of input arguments do not match in cvcalcopticalflowbm opencv 2.4.7

I want to calculate optical flow using cvcalcopticalflowBM function in opencv 2.4.7
When I complied the belowed code. The error message is "Sizes of input arguments do not macth() in cvcalcopticalflowbm
I do not understand why it is. Please help me. Thank you advance.
#define BS 5
IplImage *imgA = NULL, *imgB = NULL;
IplImage *grayA = NULL, *grayB = NULL;
IplImage *velx = NULL, *vely = NULL;
IplImage *result = NULL;
imgA = cvLoadImage("00.jpg", 1);
imgB = cvLoadImage("01.jpg", 1);
grayA = cvCreateImage(cvGetSize(imgA), IPL_DEPTH_8U, 1);
grayB = cvCreateImage(cvGetSize(imgA), IPL_DEPTH_8U, 1);
cvCvtColor(imgA, grayA, CV_BGR2GRAY);
cvCvtColor(imgB, grayB, CV_BGR2GRAY);
CvSize size = cvGetSize(imgA);
size.width /= BS;
size.height /= BS;
result = cvCreateImage(size, IPL_DEPTH_8U, 1);
for (int i=0; i<size.height; i++) {
for (int j=0; j<size.width; j++) {
cvSet(result, CV_RGB(255,255,255), NULL);
}
}
velx = cvCreateImage(size, IPL_DEPTH_32F, 1);
vely = cvCreateImage(size, IPL_DEPTH_32F, 1);
cvCalcOpticalFlowBM(grayB, grayA, cvSize(BS, BS), cvSize(1, 1), cvSize(1, 1), 0, velx, vely);
//
cvNamedWindow("HorFlowBM", CV_WINDOW_AUTOSIZE);
cvShowImage("HorFlowBM", velx);
cvNamedWindow("VerFlowBM", CV_WINDOW_AUTOSIZE);
cvShowImage("VerFlowBM", vely);
for (int i=0; i<size.height; i+=2) {
for (int j=0; j<size.width; j+=2) {
int dx = (int)cvGetReal2D(velx, i, j);
int dy = (int)cvGetReal2D(vely, i, j);
cvLine(result, cvPoint(j, i), cvPoint(j+dx, i+dy), CV_RGB(0,0,0), 1, 8, 0);
}
}
cvNamedWindow("OpticalFlow", CV_WINDOW_AUTOSIZE);
cvShowImage("OpticalFlow", result);
cvWaitKey(0);
Are you sure that the input images are getting load. Try to show them after loading them i.e. cvShowImage("input1", imgA);. Also, try to print the size of both the images to check that the size of both the images is same.
I recognized this error.
The size of velx and vely should be
CvSize velSize =
{
(grayA->width - BLOCK_SIZE + SHIFT_SIZE)/SHIFT_SIZE,
(grayA->height - BLOCK_SIZE + SHIFT_SIZE)/SHIFT_SIZE
};
It becomes correctly when complie the program

Resources