how to reduce motion blur in picture? - opencv

I am trying to detect the QR data from a blurry image and have not been successful till now.
I have tried a couple of morphology operations on the image and still did not get the data embedded in it.
How could I improve the situation?
I have tried this so far:
#include <iostream>
#include <opencv2/highgui.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/barcode.hpp>
int main() {
cv::Mat imageMat = cv::imread("/Users/apple/Downloads/36.jpg");
if(imageMat.empty()) {
std::cout << "Image not present and can not be opened" << std::endl;
return 0;
}
cv::Mat imageGray, imageBlur, imageCanny, imageDilated, imageEroded, thresholdImage;
cv::cvtColor(imageMat, imageGray, cv::COLOR_BGR2GRAY);
cv::GaussianBlur(imageGray, imageBlur, cv::Size(3,3), 3, 0);
std::cout << "Gaussian blur done" << std::endl;
// cv::Canny(imageBlur, imageCanny, 25, 75);
cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3,3));
cv::dilate(imageBlur, imageDilated, kernel);
cv::erode(imageDilated, imageEroded, kernel);
cv::threshold(imageEroded, thresholdImage, 5, 255, cv::THRESH_BINARY+cv::THRESH_OTSU);
std::cout << "Threshold done" << std::endl;
// Reads the barcode/dat
cv::QRCodeDetector qrDecoder;
std::string decodedData = qrDecoder.detectAndDecode(thresholdImage);
std::cout << "Decoded data = " << decodedData << std::endl;
This code is not decoding the data from the image which has blurry QR code. What would you guys prefer to unblur the image and get the data embedded in it? Increase the contrast? Increase the brightness?
Any document can be helpful too.
Thank you in advance.

Related

How to convert Opencv Mat frame into a VP8 frame

My problem is this:
I need to get the webcam video, get each frame of the video as VP8 frames to packetize it on a RTP stream or to directly get/create an RTP stream to send through a WebRTC application.
I saw that I can get the camera via OpenCV this is an complete example:
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"
#include <iostream>
using namespace cv;
using namespace std;
void drawText(Mat & image);
int main()
{
cout << "Built with OpenCV " << CV_VERSION << endl;
Mat image;
VideoCapture capture;
capture.open(0); // webcam device
if(capture.isOpened())
{
cout << "Capture is opened" << endl;
for(;;)
{
capture >> image;
if(image.empty())
break;
drawText(image);
imshow("Sample", image);
if(waitKey(10) >= 0)
break;
}
}
else
{
cout << "No capture" << endl;
image = Mat::zeros(480, 640, CV_8UC1);
drawText(image);
imshow("Sample", image);
waitKey(0);
}
return 0;
}
void drawText(Mat & image)
{
putText(image, "Hello OpenCV",
Point(20, 50),
FONT_HERSHEY_COMPLEX, 1, // font face and scale
Scalar(255, 255, 255), // white
1, LINE_AA); // line thickness and type
}
I have "Mat image", so I can the the Mat (each video frame into a OpenCV Mat) on the line:
capture >> image;
is it possible to get the bytes from the Mat class and encode it into a VP8 frame?
I saw there is a VideoWriter class on OpenCV where I can setup the VP8 encoding, but id like not to save anything on disk since I only need the VP8 bytes to send over a stream.
Is it possible to get the bytes from the camera and encode it to a VP8 frame or even to get the whole camera recording and setup a RTP stream with OpenCV?

Frames Lost while processing video with opencv

I am capturing a video with 30fps, but when I process the video with openCV for AruCo marker detection I am loosing almost half of the frames. So for a 5 min video I expect 5x60x30 = 9000 frames but I am getting only about 4500 frames. I tried different resolution and fps while recording but the problem still persists. My code is as follows. I later want to sync the video with the audio recorded from the camera, so even if I could know the frames which are being lost can solve my problem. Any pointers or suggestions are welcome.
#include "opencv2\core.hpp"
#include "opencv2\imgcodecs.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2\highgui.hpp"
#include "opencv2\aruco.hpp"
#include "opencv2\calib3d.hpp"
#include <time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include "stdafx.h"
using namespace std;
using namespace cv;
#define _CRT_SECURE_NO_WARNINGS 1
int startWebcamMonitoring() //(const Mat& cameraMatrix, const Mat&
distanceCoefficients, float arucoSquareDimensions)
{
Mat frame4;
Scalar_<double> borderColor, borderColor2, borderColor3;
vector<int> markerIds;
vector < vector<Point2f>> markerCorners, rejectedCandidates ;
aruco::DetectorParameters parameters;
Ptr<aruco::Dictionary> makerDiktionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
VideoCapture cap("sample.mp4");
double fps = cap.get(CV_CAP_PROP_FPS);
cout << "Frames per second : " << fps << endl;
while (true)
{
cap >> sample;
if (!cap.read(frame4))
break;
aruco::detectMarkers(frame4, makerDiktionary, markerCorners, markerIds);
aruco::drawDetectedMarkers(frame4, markerCorners, markerIds, borderColor);
aruco::estimatePoseSingleMarkers(markerCorners, arucoSquareDimension,
cameraMatrix, distanceCoefficients, rotationVectors, translationVectors);
Mat rotationMatrix
for (int i = 0; i < markerIds.size(); i++)
{
aruco::drawAxis(frame4, cameraMatrix, distanceCoefficients, rotationVectors[i], translationVectors[i], 0.01f);
Rodrigues(rotationVectors[i], rotationMatrix);
time_t current = time(0);
cout << " Translation " << translationVectors[i] << " ID" << markerIds[i] << " Euler angles " << 180 / 3.1415*rotationMatrixToEulerAngles(rotationMatrix) << "current time " << ctime(&current) << endl;
}
freopen("output_sample", "a", stdout);
imshow("recording", frame4);
if (waitKey(30) >= 0) break;
}
The problem is:
cap >> sample;
if (!cap.read(frame4))
break;
the program is reading a frame from source twice in every iteration.
You should remove the cap >> sample; line and it will be fine.

Wound Segmentation using Wavelet Transform in OpenCV

We tried Local Histogram approach for wound segmentation which didn't work well for all kinds of images and then we taught to use Wavelet transform for wound segmentation.
Which Wavelet transform will be good for wound segmentation and some tips to implement it ??
Is there any better way than the wavelet transform to segment wound in all light conditions ??
We also tried Image Clustering Which didn't went that well.
Here are some test cases and clustering program we used.
#include "cv.h"
#include "highgui.h"
#include <iostream>
void show_result(const cv::Mat& labels, const cv::Mat& centers, int height, int width);
int main(int argc, const char * argv[])
{
cv::Mat image = cv::imread("kmean.jpg");
if ( image.empty() ) {
std::cout << "unable to load an input image\n";
return 1;
}
//cv::cvtColor(image,image,CV_BGR2HSV);
std::cout << "image: " << image.rows << ", " << image.cols << std::endl;
assert(image.type() == CV_8UC3);
cv::imshow("image", image);
cv::Mat reshaped_image = image.reshape(1, image.cols * image.rows);
std::cout << "reshaped image: " << reshaped_image.rows << ", " << reshaped_image.cols << std::endl;
assert(reshaped_image.type() == CV_8UC1);
//check0(image, reshaped_image);
cv::Mat reshaped_image32f;
reshaped_image.convertTo(reshaped_image32f, CV_32FC1, 1.0 / 255.0);
std::cout << "reshaped image 32f: " << reshaped_image32f.rows << ", " << reshaped_image32f.cols << std::endl;
assert(reshaped_image32f.type() == CV_32FC1);
cv::Mat labels;
int cluster_number = 4;
cv::TermCriteria criteria(cv::TermCriteria::COUNT, 100, 1);
cv::Mat centers;
cv::kmeans(reshaped_image32f, cluster_number, labels, criteria, 1, cv::KMEANS_PP_CENTERS, centers);
show_result(labels, centers, image.rows,image.cols);
return 0;
}
void show_result(const cv::Mat& labels, const cv::Mat& centers, int height, int width)
{
std::cout << "===\n";
std::cout << "labels: " << labels.rows << " " << labels.cols << std::endl;
std::cout << "centers: " << centers.rows << " " << centers.cols << std::endl;
assert(labels.type() == CV_32SC1);
assert(centers.type() == CV_32FC1);
cv::Mat rgb_image(height, width, CV_8UC3);
cv::MatIterator_<cv::Vec3b> rgb_first = rgb_image.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> rgb_last = rgb_image.end<cv::Vec3b>();
cv::MatConstIterator_<int> label_first = labels.begin<int>();
cv::Mat centers_u8;
centers.convertTo(centers_u8, CV_8UC1, 255.0);
cv::Mat centers_u8c3 = centers_u8.reshape(3);
while ( rgb_first != rgb_last ) {
const cv::Vec3b& rgb = centers_u8c3.ptr<cv::Vec3b>(*label_first)[0];
*rgb_first = rgb;
++rgb_first;
++label_first;
}
cv::imshow("tmp", rgb_image);
cv::waitKey();
}
Would-1 with Background : (two clusters)
Would-1 with out Background :
Would-2 with Background :
Would-2 with out Background :(three clusters)
When we remove background we are getting a bit better segmentation, but for removing background we are using grab-cut which relies on manual operation. So we need a substitute for the kmean-clustering for segmenting image (or) some improvements in above code to achieve 100% success cases.
So is there any better way to segment the wounds ??
Instead of attempting to use the traditional wavelet transform, you may want to try Haar-like wavelets tuned for object detection tasks, similar to the basis of integral images used in the Viola Jones face detector. This paper by Lienhart et al, used for generic object detection, would be a good start.
From the looks of your example images, the variance of intensities within small pixel neighbourhoods in the wound is a lot higher, whereas the unbruised skin appears to be fairly uniform in small neighbourhoods. The Lienhart paper should be able to detect such variations - you can either feed the features into a machine learning setup, or just make manual observations and define the search windows and related heuristics.
Hope this helps.

OpenCV Capture from Camera Results in Noisy-like Image

I am using OpenCV on an embedded target board (FriendlyARM mini6410, processor arm 1176 running linux kernel 2.6.38).
I compiled OpenCV 2.4.4 library using toolchain provided for the board, found in the ftp (pls see the website of FriendlyARM). I disabled GTK, ffmpeg and enable v4l. The library is compiled successfully.
Then I write code:
#include <opencv.hpp>
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
int i;
cout << "initialise" << endl;
IplImage* img=0;
cout << "capturing ..." << endl;
CvCapture* capture = cvCaptureFromCAM(2);
cout << "get here" << endl;
if(!capture){
cout << "not capture" << endl;
return -1;
}
cout << "captured" << endl;
img=cvQueryFrame(capture);
IplImage* img1 = cvCreateImage(cvGetSize(img),8,3);
// cvCvtColor(img,img1,CV_RGB2GRAY);
cvCopy(img, img1);
cvSaveImage("cam_snap.jpg",img1);
cvReleaseImage( &img1 );
cvReleaseImage( &img );
cvReleaseCapture( &capture );
cout << "exit" << endl;
return 0;
}
The code is built successfully. I run the .elf executable in the target board, connected to camera (PS3 eye), but the resulting image looks like a broken television (noise-like):
While in my host, the resulting image is as expected (scene in front of camera). Can you provide me suggestion as to what went wrong or where should I start on debugging?
You should check your depth and channels. It is probably a matter of alignement, moreover be careful your image is probably in BGR and not in RGB.
And you should use cv::Mat instead of IplImage in C++ and VideoCapture instead of CVCapture.
This sample of code should work. (Not tested on the same arch as your)
#include <opencv.hpp>
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
VideoCapture capture = cv::VideoCapture(0);
cout << "get here" << endl;
if(!capture.isOpened()) // check if we succeeded
return -1;
cout << "captured" << endl;
Mat img;
capture >> img;
imwrite("./test.png", img);
capture.release();
cout << "exit" << endl;
return 0;
}
Hope it helped.
Okay, confirmed. mini 6410 runs USB 1.0, and ps3 eye needs USB 2.0. I tried the program using standard webcam (chinese product, itech pc camera), works wonderfully. Saved image is showing scene in front of camera

opencv cvblob -- render blobs other than white

I am trying to detect objects with cvblob. Somehow, my code only marks the white objects. How to mark objects of other colors, like a can of beer or a bottle of water.
Here is my code:
#include "highgui.h"
#include "cv.h"
#include "cvaux.h"
#include "iostream"
#include <stdio.h>
#include <ctype.h>
#include <cvblob.h>
using namespace cv;
using namespace std;
using namespace cvb;
int main(int argc, char** argv) {
CvTracks tracks;
cvNamedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
capture = cvCreateCameraCapture( 0 ); //capture frames from cam on index 0: /dev/video0/
if (!capture) {
return -1;
}
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
frame = cvQueryFrame(capture);
while(frame) {
IplImage *gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
cvCvtColor(frame, gray, CV_BGR2GRAY);
cvThreshold(gray, gray, 150, 255, CV_THRESH_BINARY);
IplImage *labelImg=cvCreateImage(cvGetSize(gray), IPL_DEPTH_LABEL, 1);
CvBlobs blobs;
unsigned int result=cvLabel(gray, labelImg, blobs);
cvFilterByArea(blobs, 500, 1000000);
// cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_CENTROID);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it) {
cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
}
cvShowImage("frame", frame);
cout << "----------------------------" << endl;
frame = cvQueryFrame(capture);
char c = cvWaitKey(10);
if(c==27)
break;
}
}
Any tip is appreciated.
Milo
That's the option by default and you cannot change it if you don't change the source code in cvblob library.
If you really want to change this is so easy, you can create a copy of the same method adding a new input var like CvScalar to select output color. It's so easy.
The method cvRenderBlob will be in cvcontour.cpp.
I've been made many improvement in cvblob library and in next months I will push it to the creator.
Try adding:
"cvInRangeS(hsvframe,cvScalar(23,41,133),cvScalar(40,150,255),threshy);//for yellow"
Before Filtering the blobs. Its a range of HSV(instead of RGB) values that defines the threshold of the desire color.
Hope it helps.

Resources