Arduino servo with facetracking using arduino, processing and opencv - opencv

I'm working on a project to keep your face in the center of the screen, by using a camera on top of a servo. I've used the simple servo control tutorial, on the arduino playground website, to use the mouse to control the servo and tried to rewrite it to use your face's x coordinates to make the servo move in the desired direction.
simple servo control arduino playground
So far i got it working with the built-in camera. The servo moves nicely in the right direction with my face. But as soon as I use the external USB camera on top of the servo instead of the built-in camera I don't get the desired result. The camera doesn't wanna look at me. as soon as it detects your face it turns straight in the opposite direction. So if the camera detects your face on the left side of the screen, the servo will turn to the right until your face is out of the screen.
I hope that someone can answer or help me explain why it works with the built-in camera but not when I use the USB camera that i attached on the servo.
I'm using the Arduino, Processing and the OpenCV library in Processing.
This is the code that i have so far:
Arduino code:
#include <Servo.h>
Servo servo1; Servo servo2;
void setup() {
servo1.attach(4);
servo2.attach(10);
Serial.begin(19200);
Serial.println("Ready");
}
void loop() {
static int v = 0;
if ( Serial.available()) {
char ch = Serial.read();
switch(ch) {
case '0'...'9':
v = v * 10 + ch - '0';
/*
so if the chars sent are 45x (turn x servo to 45 degs)..
v is the value we want to send to the servo and it is currently 0
The first char (ch) is 4 so
0*10 = 0 + 4 - 0 = 4;
Second char is 4;
4*10 = 40 + 5 = 45 - 0 = 45;
Third char is not a number(0-9) so we drop through...
*/
break;
case 's':
servo1.write(v);
v = 0;
break;
case 'w':
servo2.write(v);
v = 0;
break;
case 'd':
servo2.detach();
break;
case 'a':
servo2.attach(10);
break;
}
}
}
My processing code:
import gab.opencv.*;
import processing.video.*;
import java.awt.*;
//----------------
import processing.serial.*;
int gx = 15;
int gy = 35;
//int spos=90;
float midden=90;
float leftColor = 0.0;
float rightColor = 0.0;
Serial port;
//----------------
Capture video;
OpenCV opencv;
void setup() {
size(640, 480);
String[] cameras = Capture.list();
if (cameras.length == 0) {
println("There are no cameras available for capture.");
exit();
} else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
}
//----------------
colorMode(RGB, 1.0);
noStroke();
frameRate(100);
//println(Serial.list()); // List COM-ports
//select second com-port from the list
port = new Serial(this, Serial.list()[5], 19200); //arduino aangesloten aan linker USB
//----------------
video = new Capture(this, 640/2, 480/2, "USB2.0 Camera"); //external camera rechter USB
//video = new Capture(this, 640/2, 480/2); //built-in camera
opencv = new OpenCV(this, 640/2, 480/2);
opencv.loadCascade(OpenCV.CASCADE_FRONTALFACE);
video.start();
//-_-_-_-_-_-_-_-_- weergave kleur camera
opencv.useColor();
}
void draw() {
//---------------- Mouse Control
background(0.0);
update(mouseX);
fill(mouseX/4);
rect(150, 320, gx*2, gx*2);
fill(180 - (mouseX/4));
rect(450, 320, gy*2, gy*2);
//----------------
scale(2);
opencv.loadImage(video);
//-_-_-_-_-_-_-_-_- Flip camera image
opencv.flip(OpenCV.HORIZONTAL);
image(video, 0, 0 );
//-_-_-_-_-_-_-_-_-
image(opencv.getOutput(), 0, 0 );
noFill();
stroke(0, 255, 0);
strokeWeight(3);
Rectangle[] faces = opencv.detect();
//println(faces.length);
for (int i = 0; i < faces.length; i++) {
println(faces[i].x + "," + faces[i].y);
rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height); //groene vierkant om het gezicht
ellipse( faces[i].x + 0.5*faces[i].width, faces[i].y + 0.5*faces[i].height, 5, 5 ); //middenpunt v.h. gezicht
midden= (faces[i].x + 0.5*faces[i].width);
//midden= (faces[i].x);
}
}
void captureEvent(Capture c) {
c.read();
}
//---------------- servo controls voor muislocatie en draaiing servo
void update(int x)
{
//Calculate servo postion from mouseX
//spos= x/4;
//Output the servo position ( from 0 to 180)
port.write("s"+midden);
println(midden);
// if( midden>80 && midden<150){
// port.write("s"+90);
// } else if(midden<80){
// port.write("s"+45);
// }else{
// port.write("s"+135);
// }
}
//----------------

It sounds like the two images are flipped. To test this, try drawing a circle on the left side of both images (and then display using imshow) to see if they end up at the same location.

Related

mapping depth image to color image with realsense2 about opencv

I use librealsense2 library.
I refer to this site.. https://github.com/IntelRealSense/librealsense/blob/master/examples/align/rs-align.cpp
After mapping depth image to color image with realsense2 library,
I want to display the image with opencv Mat(imshow) function.
so i coded as..
#include "librealsense2/rs.hpp"
#include <opencv2/opencv.hpp>
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>
using namespace std;
using namespace cv;
void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);
int main(int args, char * argv[]) try
{
// Create and initialize GUI related objects
rs2::colorizer c;
rs2::config cfg;
rs2::pipeline pipe;
const int width = 1280;
const int height = 720;
c.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED, 1.f);
c.set_option(RS2_OPTION_COLOR_SCHEME, 2.f); // White to Black
cfg.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_BGR8, 30);
cfg.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, 30);
rs2::pipeline_profile profile = pipe.start(cfg);
float depth_scale = get_depth_scale(profile.get_device());
rs2_stream align_to = find_stream_to_align(profile.get_streams());
rs2::align align(align_to);
float depth_clipping_distance = 3.f;
while (true)
{
rs2::frameset frameset = pipe.wait_for_frames();
if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
{
profile = pipe.get_active_profile();
align_to = find_stream_to_align(profile.get_streams());
align = rs2::align(align_to);
depth_scale = get_depth_scale(profile.get_device());
}
auto processed = align.process(frameset);
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
if (!aligned_depth_frame || !other_frame)
{
continue;
}
remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
namedWindow("other window", WINDOW_AUTOSIZE);
namedWindow("depth window", WINDOW_AUTOSIZE);
imshow("other window", other_frameaM);
imshow("depth window", aligned_depthM);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));
int width = other_frame.get_width();
int height = other_frame.get_height();
int other_bpp = other_frame.get_bytes_per_pixel();
#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
for (int y = 0; y < height; y++)
{
auto depth_pixel_index = y * width;
for (int x = 0; x < width; x++, ++depth_pixel_index)
{
// Get the depth value of the current pixel
auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];
// Check if the depth value is invalid (<=0) or greater than the threashold
if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
{
// Calculate the offset in other frame's buffer to current pixel
auto offset = depth_pixel_index * other_bpp;
// Set pixel to "background" color (0x999999)
std::memset(&p_other_frame[offset], 0x99, other_bpp);
}
}
}
}
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
//Given a vector of streams, we try to find a depth stream and another stream to align depth with.
//We prioritize color streams to make the view look better.
//If color is not available, we take another stream that (other than depth)
rs2_stream align_to = RS2_STREAM_ANY;
bool depth_stream_found = false;
bool color_stream_found = false;
for (rs2::stream_profile sp : streams)
{
rs2_stream profile_stream = sp.stream_type();
if (profile_stream != RS2_STREAM_DEPTH)
{
if (!color_stream_found) //Prefer color
align_to = profile_stream;
if (profile_stream == RS2_STREAM_COLOR)
{
color_stream_found = true;
}
}
else
{
depth_stream_found = true;
}
}
if (!depth_stream_found)
throw std::runtime_error("No Depth stream available");
if (align_to == RS2_STREAM_ANY)
throw std::runtime_error("No stream found to align with Depth");
return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
for (auto&& sp : prev)
{
//If previous profile is in current (maybe just added another)
auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
if (itr == std::end(current)) //If it previous stream wasn't found in current
{
return true;
}
}
return false;
}
There are only gray screens and nothing happens.
Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);
I guess there are no problem. because the depth image and rgb image were opened well in CV_8UC3 format.
However, when I try to calibrate and then I got it in opencv, the image appears only in gray screen.
auto frames = pipe.wait_for_frames(); // Wait for next set of frames from the camera
rs2::video_frame color = frames.get_color_frame();
rs2::depth_frame depth = color_map(frames.get_depth_frame());
if (!color)
color = frames.get_infrared_frame();
Mat colorM(Size(width, height), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
Mat depthM(Size(width, height), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
It is a part of code that output color image and depth image.
This works well.
so I guess..
rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());
Whatever the process, I thought it would run because it fetches it in frame format. I think I have a very big mistake on this code side.
Which part is wrong?
enter image description here
There are several ways to store an image in memory. There is no guarantee that you can just pass the buffer and it'll all work. try to copy pixel by pixel.
You should know that OpenCV uses BGR interleaved image format, while realsense might use another.
1) Get aligned frames
frameset data = pipe.wait_for_frames();
frameset aligned_set = align_to.process(data);
auto color_mat = frame_to_mat(aligned_set.get_color_frame());
auto depth_mat = frame_to_mat(aligned_set.get_depth_frame());
2) frame_to_mat helper function
cv::Mat frame_to_mat(const rs2::frame& f)
{
using namespace cv;
using namespace rs2;
auto vf = f.as<video_frame>();
const int w = vf.get_width();
const int h = vf.get_height();
if (f.get_profile().format() == RS2_FORMAT_BGR8)
{
return Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_RGB8)
{
auto r = Mat(Size(w, h), CV_8UC3, (void*)f.get_data(), Mat::AUTO_STEP);
cvtColor(r, r, CV_RGB2BGR);
return r;
}
else if (f.get_profile().format() == RS2_FORMAT_Z16)
{
return Mat(Size(w, h), CV_16UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
else if (f.get_profile().format() == RS2_FORMAT_Y8)
{
return Mat(Size(w, h), CV_8UC1, (void*)f.get_data(), Mat::AUTO_STEP);
}
throw std::runtime_error("Frame format is not supported yet!");
}

How to minimize face-detection error

here is the code now I can both detect face and mouth together, and able to roughly measure the distance of its bounding box <--
the problem is the mouth detection seems to detects everything they defines as mouth even it is not
and I want to use the "face" bounding box as a mouth detection region to minimize its error, I don't know if Forloop stacked would work? by put mouth loop inside face loop?? I'm fairly new to coding any help would be appreciated
import gab.opencv.*;
import java.awt.Rectangle;
import processing.video.*;
Capture video;
OpenCV f;
OpenCV m;
void setup() {
size(800, 600);
video = new Capture(this, 800/2, 600/2);
f = new OpenCV(this, 800/2, 600/2);
m = new OpenCV(this, 800/2, 600/2);
video.start();
}
void draw() {
scale(2);
f.loadImage(video);
m.loadImage(video);
f.loadCascade(OpenCV.CASCADE_FRONTALFACE);
m.loadCascade(OpenCV.CASCADE_MOUTH);
image(video, 0, 0 );
noFill();
stroke(0, 255, 0);
strokeWeight(3);
Rectangle[] mouth = m.detect();
Rectangle[] face = f.detect();
println(mouth.length);
strokeWeight(3);
for (int i = 0; i < face.length; i++) {
println(face[i].x + "," + face[i].y);
rect(face[i].x, face[i].y, face[i].width, face[i].height);
}
for (int i = 0; i < mouth.length; i++) {
println(mouth[i].x + "," + mouth[i].y);
rect(mouth[i].x, mouth[i].y, mouth[i].width, mouth[i].height);
}
for (int i = 0; i < mouth.length; i++) {
fill(255, 0, 0);
noStroke();
ellipse((mouth[i].x)+(mouth[i].width/2), mouth[i].y, 5, 5);
ellipse((mouth[i].x)+(mouth[i].width/2), (mouth[i].y)+ (mouth[i].height), 5, 5);
}
for (int i = 0; i < mouth.length; i++) {
int px = (mouth[i].x)+(mouth[i].width/2);
int py = (mouth[i].y)+(mouth[i].height);
int mOpen = int (dist(px, mouth[i].y, px, py));
println(mOpen);
}
}
void captureEvent(Capture d) {
d.read();
}
There are a couple issues:
You shouldn't be loading OpenCV cascades multiple times a second in draw(). You should do it once in setup() and just call detect() in draw()
OpenCV for Processing seems to override the cascade loaded in the second instance with a cascade loaded in the first instance
If accuracy isn't a huge issue, you can get away with a single cascade: the mouth one. Note that there are options/hints you can use for the detect function which may help the detection. For example you can tell the detector to detect largest object only, give it a hint of the smallest and largest bounding boxes the mouth would have with your setup and how much should the results filtered out.
Here's a code sample for the above:
import gab.opencv.*;
import java.awt.Rectangle;
import org.opencv.objdetect.Objdetect;
import processing.video.*;
Capture video;
OpenCV opencv;
//cascade detections parameters - explanations from Mastering OpenCV with Practical Computer Vision Projects
int flags = Objdetect.CASCADE_FIND_BIGGEST_OBJECT;
// Smallest object size.
int minFeatureSize = 20;
int maxFeatureSize = 80;
// How detailed should the search be. Must be larger than 1.0.
float searchScaleFactor = 1.1f;
// How much the detections should be filtered out. This should depend on how bad false detections are to your system.
// minNeighbors=2 means lots of good+bad detections, and minNeighbors=6 means good detections are given but some are missed.
int minNeighbors = 6;
void setup() {
size(320, 240);
noFill();
stroke(0, 192, 0);
strokeWeight(3);
video = new Capture(this,width,height);
video.start();
opencv = new OpenCV(this,320,240);
opencv.loadCascade(OpenCV.CASCADE_MOUTH);
}
void draw() {
//feed cam image to OpenCV, it turns it to grayscale
opencv.loadImage(video);
opencv.equalizeHistogram();
image(opencv.getOutput(), 0, 0 );
Rectangle[] mouths = opencv.detect(searchScaleFactor,minNeighbors,flags,minFeatureSize, maxFeatureSize);
for (int i = 0; i < mouths.length; i++) {
text(mouths[i].x + "," + mouths[i].y + "," + mouths[i].width + "," + mouths[i].height,mouths[i].x, mouths[i].y);
rect(mouths[i].x, mouths[i].y, mouths[i].width, mouths[i].height);
}
}
void captureEvent(Capture c) {
c.read();
}
Note that facial hair can cause false positives.
I have provided more in depth notes in an answer to your previous related question. I recommend focusing on the FaceOSC part as it will be more accurate.

GSvideo error in Processing 2.03

I am working in a Processing project, but when I try to record the sketch with the GSvideo library I get this error:
A library used by this sketch is not installed properly.
GSVideo version: 1.0.0
A library relies on native code that's not available.
Or only works properly when the sketch is run as a 64-bit application.
In my project I'm tracking objects with the HSV space color and the OpenCV for Processing library and I want to record the sketch just so I can show later my work. This is my code:
/**
* HSVColorTracking
* Greg Borenstein
* https://github.com/atduskgreg/opencv-processing-book/blob/master/code/hsv_color_tracking/HSVColorTracking/HSVColorTracking.pde
*
* Modified by Jordi Tost #jorditost (color selection)
* University of Applied Sciences Potsdam, 2014
*
* Modified by Luz Alejandra Magre
* Universidad Tecnológica de Bolívar, 2015
*/
import gab.opencv.*;
import processing.video.*;
import codeanticode.gsvideo.*;
import java.awt.Rectangle;
GSCapture video;
OpenCV opencv;
GSMovieMaker mm;
int fps = 30;
PImage src, colorFilteredImage;
ArrayList<Contour> contours;
// <1> Set the range of Hue values for our filter
int rangeLow = 20;
int rangeHigh = 35;
void setup() {
frameRate(fps);
String[] cameras = GSCapture.list();
size(2*opencv.width, opencv.height, P2D);
if (cameras.length == 0)
{
println("There are no cameras available for capture.");
exit();
}
else {
println("Available cameras:");
for (int i = 0; i < cameras.length; i++) {
println(cameras[i]);
}
video = new GSCapture(this, 640, 480, cameras[0]);
video.start();
opencv = new OpenCV(this, video.width, video.height);
contours = new ArrayList<Contour>();
mm = new GSMovieMaker(this, width, height, "Test.ogg", GSMovieMaker.THEORA, GSMovieMaker.BEST, fps);
mm.setQueueSize(50, 10);
mm.start();
}
}
void draw() {
// Read last captured frame
if (video.available()) {
video.read();
}
// <2> Load the new frame of our movie in to OpenCV
opencv.loadImage(video);
// Tell OpenCV to use color information
opencv.useColor();
src = opencv.getSnapshot();
// <3> Tell OpenCV to work in HSV color space.
opencv.useColor(HSB);
// <4> Copy the Hue channel of our image into
// the gray channel, which we process.
opencv.setGray(opencv.getH().clone());
// <5> Filter the image based on the range of
// hue values that match the object we want to track.
opencv.inRange(rangeLow, rangeHigh);
// <6> Get the processed image for reference.
colorFilteredImage = opencv.getSnapshot();
///////////////////////////////////////////
// We could process our image here!
// See ImageFiltering.pde
///////////////////////////////////////////
// <7> Find contours in our range image.
// Passing 'true' sorts them by descending area.
contours = opencv.findContours(true, true);
// <8> Display background images
image(src, 0, 0);
image(colorFilteredImage, src.width, 0);
// <9> Check to make sure we've found any contours
if (contours.size() > 0) {
// <9> Get the first contour, which will be the largest one
Contour biggestContour = contours.get(0);
// <10> Find the bounding box of the largest contour,
// and hence our object.
Rectangle r = biggestContour.getBoundingBox();
// <11> Draw the bounding box of our object
noFill();
strokeWeight(2);
stroke(255, 0, 0);
rect(r.x, r.y, r.width, r.height);
// <12> Draw a dot in the middle of the bounding box, on the object.
noStroke();
fill(255, 0, 0);
ellipse(r.x + r.width/2, r.y + r.height/2, 30, 30);
text(r.x + r.width/2, 50, 50);
text(r.y + r.height/2, 50, 80);
}
loadPixels();
mm.addFrame(pixels);
saveFrame("frame-######.png");
}
void mousePressed() {
color c = get(mouseX, mouseY);
println("r: " + red(c) + " g: " + green(c) + " b: " + blue(c));
int hue = int(map(hue(c), 0, 255, 0, 180));
println("hue to detect: " + hue);
rangeLow = hue - 5;
rangeHigh = hue + 5;
}
void keyPressed() {
if (key == ' ') {
// Finish the movie if space bar is pressed
mm.finish();
// Quit running the sketch once the file is written
exit();
}
}
I would really appreciate the help on this.

Detetcting intersection between 2 line in webcam feed opencv

i'm trying to detect the intersection between two line in webcam feed. Here's the screenshot of what i already have
I try to find the intersection between my red and green line.
And here's the code of what i already have
#include "stdafx.h"
#include <cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <highgui.h>
IplImage* imgTracking;
int lastX = -1;
int lastY = -1;
//This function threshold the HSV image and create a binary image
IplImage* GetThresholdedImage(IplImage* imgHSV){
IplImage* imgThresh=cvCreateImage(cvGetSize(imgHSV),IPL_DEPTH_8U, 1);
cvInRangeS(imgHSV, cvScalar(170,160,60), cvScalar(180,2556,256), imgThresh);
return imgThresh;
}
void trackObject(IplImage* imgThresh){
// Calculate the moments of 'imgThresh'
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if(area>1000){
// calculate the position of the ball
int posX = moment10/area;
int posY = moment01/area;
if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4);
}
lastX = posX;
lastY = posY;
}
cvLine(imgTracking,cv::Point(100,300) , cv::Point(600,300),cv::Scalar(0,200,0),2,8);
free(moments);
}
bool intersection(cv::Point lastX, cv::Point lastY, cv::Point , cv::Point())
{
}
/*
void imaginaryline()
{
cv::Mat img = cv::Mat::zeros(500, 500, CV_8UC3);
cv::line(img, cv::Point(100,200) , cv::Point(400,100),cv::Scalar(0,200,0),2,8);
}*/
int main(){
CvCapture* capture =0;
capture = cvCaptureFromCAM(0);
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
cvNamedWindow("Ball");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel
IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV
IplImage* imgThresh = GetThresholdedImage(imgHSV);
cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
//track the possition of the ball
trackObject(imgThresh);
printf("Pos X = %d", lastX);
printf("Pos Y = %d", lastY);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Ball", imgThresh);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgHSV);
cvReleaseImage(&imgThresh);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows() ;
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
Thank you for your attention guys, i waited for any of your response
UPDATE :
Thanks to Sebastian Schmitz, i sollved it. Here's my code
void checkIntersection(int line, int lastY, int y)
{
if(lastY << line && y >= line || lastY > line && y <= line)
{
printf("intersection detected");
}
}
void trackObject(IplImage* imgThresh){
// Calculate the moments of 'imgThresh'
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if(area>1000){
// calculate the position of the ball
int posX = moment10/area;
int posY = moment01/area;
if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4);
}
checkIntersection(300, lastY, posY);
lastX = posX;
lastY = posY;
}
cvLine(imgTracking,cv::Point(100,300) , cv::Point(600,300),cv::Scalar(0,200,0),2,8);
cvRectangle(imgTracking,cv::Point(400,400), cv::Point(450,450),cv::Scalar(0,200,0),2,8);
free(moments);
}
i put the call for checkintersection function inside trackobject function so i dont have to change variable posY into global which lead to many error.
Thank you all for your response
If the line is always perfectly horiontal, it will be enough to test if the y coordinate of your last point is on one side of the line and your current Point on the other:
//Pseudocode:
int line = 20; //horizontal line on y-coordinate "20"
while(tracking == true){
int lastY = posY;
int y = getCoordinate().getY(); //call the y-coordinate of your current point
checkIntersection(line, lastY, y)
}
checkIntersection(int line, int lastY, int y){
if(lastY < line && y >= line ||
lastY > line && y <= line ){
print("intersection detected")
//optional additional check if point is between endpoint of line if you have to
}
}

OpenCV 2.4.X slow on square detection with WebCam vs OpenCV 2.1.X

I have tried to port Square detection with OpenCV 2.4.1-2.4.4 but results seem very slow. I was keen to move to newer versions of OpenCV because of new functionality given, but am having very slow results.
My OpenCV code for versions 2.4.X is:
// The "Square Detector" program.
// It loads several images sequentially and tries to find squares in
// each image
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
}
imshow(wndname, image);
}
int main()
{
VideoCapture cap;
cap.open(0);
Mat frame,image;
namedWindow( "Square Detection Demo", 1 );
vector<vector<Point> > squares;
for(;;)
{
cap >> frame;
if( frame.empty() ){
break;
}
frame.copyTo(image);
if( image.empty() )
{
cout << "Couldn't load image" << endl;
continue;
}
findSquares(image, squares);
drawSquares(image, squares);
//imshow("Window", image);
int c = waitKey(1);
if( (char)c == 27 )
break;
}
return 0;
}
You can notice that the code is a simple mix of Webcam visualization and the squares code provided both by OpenCV 2.4.X.
However, the equivalent code for version 2.1 of OpenCV which i will put now is a lot faster:
#include <cv.h>
#include <highgui.h>
int thresh = 50;
IplImage* img = 0;
IplImage* img0 = 0;
CvMemStorage* storage = 0;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
CvSeq* contours;
int i, c, l, N = 11;
CvSize sz = cvSize( img->width & -2, img->height & -2 );
IplImage* timg = cvCloneImage( img ); // make a copy of input image
IplImage* gray = cvCreateImage( sz, 8, 1 );
IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
IplImage* tgray;
CvSeq* result;
double s, t;
// create empty sequence that will contain points -
// 4 points per square (the square's vertices)
CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
// select the maximum ROI in the image
// with the width and height divisible by 2
cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
//cvSetImageROI( timg, cvRect( 0,0,50, 50 ));
// down-scale and upscale the image to filter out the noise
cvPyrDown( timg, pyr, 7 );
cvPyrUp( pyr, timg, 7 );
tgray = cvCreateImage( sz, 8, 1 );
// find squares in every color plane of the image
for( c = 0; c < 3; c++ )
{
// extract the c-th color plane
cvSetImageCOI( timg, c+1 );
cvCopy( timg, tgray, 0 );
// try several threshold levels
for( l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, thresh, 5 );
// dilate canny output to remove potential
// holes between edge segments
cvDilate( gray, gray, 0, 1 );
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
}
// find contours and store them all as a list
cvFindContours( gray, storage, &contours, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
// test each contour
while( contours )
{
// approximate contour with accuracy proportional
// to the contour perimeter
result = cvApproxPoly( contours, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( result->total == 4 &&
cvContourArea(result,CV_WHOLE_SEQ,0) > 1000 &&
cvCheckContourConvexity(result) )
{
s = 0;
for( i = 0; i < 5; i++ )
{
// find minimum angle between joint
// edges (maximum of cosine)
if( i >= 2 )
{
t = fabs(angle(
(CvPoint*)cvGetSeqElem( result, i ),
(CvPoint*)cvGetSeqElem( result, i-2 ),
(CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
}
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( s < 0.3 )
for( i = 0; i < 4; i++ )
cvSeqPush( squares,
(CvPoint*)cvGetSeqElem( result, i ));
}
// take the next contour
contours = contours->h_next;
}
}
}
// release all the temporary images
cvReleaseImage( &gray );
cvReleaseImage( &pyr );
cvReleaseImage( &tgray );
cvReleaseImage( &timg );
return squares;
}
// the function draws all the squares in the image
void drawSquares( IplImage* img, CvSeq* squares )
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage( img );
int i;
// initialize reader of the sequence
cvStartReadSeq( squares, &reader, 0 );
// read 4 sequence elements at a time (all vertices of a square)
for( i = 0; i < squares->total; i += 4 )
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM( pt[0], reader );
CV_READ_SEQ_ELEM( pt[1], reader );
CV_READ_SEQ_ELEM( pt[2], reader );
CV_READ_SEQ_ELEM( pt[3], reader );
// draw the square as a closed polyline
cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
}
// show the resultant image
cvShowImage( "Squares", cpy );
cvReleaseImage( &cpy );
}
int main(int argc, char** argv){
// Crea una ventana llamada Original Image con un tamaño predeterminado.
cvNamedWindow("Original Image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Squares", CV_WINDOW_AUTOSIZE);
// Crea la conexion con la Webcam.
CvCapture* capture = cvCreateCameraCapture(0);
if( !capture ){
throw "Error when reading steam_avi";
}
storage = cvCreateMemStorage(0);
while(true)
{
// Pongo el frame capturado dentro de la imagen originalImg.
img0 = cvQueryFrame(capture);
if(!img0){
break;
}
img = cvCloneImage( img0 );
// find and draw the squares
drawSquares( img, findSquares4( img, storage ) );
cvShowImage("Original Image", img0);
cvReleaseImage(&img);
// clear memory storage - reset free space position
cvClearMemStorage( storage );
// Espero a que me pulsen el ESC para salir del bucle infinito.
char c = cvWaitKey(10);
if( c == 27 ) break;
}
//cvReleaseImage(&img);
cvReleaseImage(&img0);
// clear memory storage - reset free space position
cvClearMemStorage( storage );
// Destruye la ventana “Original Image”.
cvDestroyWindow("Original Image");
cvDestroyWindow("Squares");
// Libera la memoria utilizada por la variable capture.
cvReleaseCapture(&capture);
}
I am aware that I can use one colour channel to speed up x3, and change other params to speed up, but wonder why equivalent codes give such different execution times.
Is there anything basic which I am missing out on?
I have tried to put working code up for everyone to try, so as to not waste anybody's time with vague questions such as: Opencv 2.4.X is slow.
Finaly left out Canny and checked for Area of square being below certain values (less 20% of image area) so that unwanted squares were not detected. As for getting multiple results for same square, am not too bothered with it at the moment, as i can input given squares as possible template images for comparisson. Now off to recognition of image in square. Thanks Chris for at least reading this comment (I cant give you points as answer as it was only a comment, but either way, thank you).

Resources