Convert matlab image svd method to opencv - opencv

I want to write a program with opencv by c++ in the visual studio.
My code is following matlab code:
close all
clear all
clc
%reading and converting the image
inImage=imread('pic.jpg');
inImageD=double(inImage);
[U,S,V]=svd(inImageD);
% Using different number of singular values (diagonal of S) to compress and
% reconstruct the image
dispEr = [];
numSVals = [];
for N=5:25:300
% store the singular values in a temporary var
C = S;
% discard the diagonal values not required for compression
C(N+1:end,:)=0;
C(:,N+1:end)=0;
% Construct an Image using the selected singular values
D=U*C*V';
% display and compute error
figure;
buffer = sprintf('Image output using %d singular values', N)
imshow(uint8(D));
title(buffer);
error=sum(sum((inImageD-D).^2));
% store vals for display
dispEr = [dispEr; error];
numSVals = [numSVals; N];
end
What's your opinion to do this? I want to save image in a text file and retrieve it from file into the Mat array. I've written this part as follow:
Mat image;
FileStorage read_file("pic_file.txt", FileStorage::READ);
read_file["pic"] >> image;
read_file.release();
Mat P;
image.convertTo(P, CV_32FC3,1.0/255);
SVD svda(P); //or SVD::compute(P,W,U,V);
But I have problem with SVD function and it doesn't work. Is there anything to do for computing SVD compression of an image?
Thank You so much.
Vahids.

Here is my code:
int main(int argc, char* argv[])
{
// Image matrix
Mat img;
Mat result;
//---------------------------------------------
//
//---------------------------------------------
namedWindow("Source Image");
namedWindow("Result");
// Load image in grayscale mode
img=imread("D:\\ImagesForTest\\cat.bmp",0);
img.convertTo(img,CV_32FC1,1.0/255.0);
cout << "Source size:" << img.rows*img.cols <<" elements "<< endl;
// create SVD
cv::SVD s;
// svd result
Mat w,u,vt;
// computations ...
s.compute(img,w,u,vt);
// collect Sigma matrix (diagonal - is eigen values, other - zeros)
// we got it in as vector, transform it to diagonal matrix
Mat W=Mat::zeros(w.rows,w.rows,CV_32FC1);
for(int i=0;i<w.rows;i++)
{
W.at<float>(i,i)=w.at<float>(i);
}
// reduce rank to k
int k=25;
W=W(Range(0,k),Range(0,k));
u=u(Range::all(),Range(0,k));
vt=vt(Range(0,k),Range::all());
// Get compressed image
result=u*W*vt;
cout << "Result size:" << u.rows*u.cols+k+vt.rows*vt.cols <<" elements "<< endl;
//---------------------------------------------
//
//---------------------------------------------
imshow("Source Image", img);
imshow("Result", result);
cvWaitKey(0);
return 0;
}
Source and result images.

Related

c++ vector push_back inconsistent behavior

I push back two sets(A={A1,A2,A3},B={B1,B2,B3}) of equal matrices(A1=B1,A2=B2,A3=B3) of same type(CV_32FC) in two different vectors(Va and Vb) of same type. When i compare the contents of the vectors pair by pair(Va[0] vs Vb[0], Va[1] vs Vb[1],Va[2] vs Vb[2])they are different. How is this possible?
Code explanation:
A= imgs_lab_channels.
Lab_channel_current_FG = Foreground image
Lab_channel_current_BG = Background image
Lab_channel_current=Lab_channel_current_FG+Lab_channel_current_BG
push Lab channel_current into vector Lab_channels
So B=Lab_channels
I check that
Lab_channel_current= imgs_lab_channels[i].
When I read back the matrices from the vectors A and B they are different.
Code snippet:
std::vector<cv::Mat> imgs_lab_channels;
split(imgs_lab, imgs_lab_channels);
std::vector<cv::Mat> Lab_channels;
cv::Mat Lab_channel_current_FG;
cv::Mat Lab_channel_current_BG;
cv::Mat Lab_channel_current;
for(int i = 0; i < 3; i++)
{
// FG_mask and BG_mask are 0-1 binary matrices of type 32FC1
// and with same size as imgs_lab_channels. FG_mask and BG_mask
// select the Foreground and background respectively. Omitted for
// sake of clarity
Lab_channel_current_FG=imgs_lab_channels[i].mul(FG_mask);
Lab_channel_current_BG=imgs_lab_channels[i].mul(BG_mask);
// add the FG and the BG image
Lab_channel_current=Lab_channel_current_FG+Lab_channel_current_BG;
Lab_channels.push_back(Lab_channel_current);
}
for(int j=0;j<3;j++)
{
temp2 = Lab_channels[j]-imgs_lab_channels[j];
double min2, max2;
cv::minMaxLoc(temp2, &min2, &max2);
std::cout << "temp2 min,max="<< min2 << "," << max2 << std::endl;
}

Wound Segmentation using Wavelet Transform in OpenCV

We tried Local Histogram approach for wound segmentation which didn't work well for all kinds of images and then we taught to use Wavelet transform for wound segmentation.
Which Wavelet transform will be good for wound segmentation and some tips to implement it ??
Is there any better way than the wavelet transform to segment wound in all light conditions ??
We also tried Image Clustering Which didn't went that well.
Here are some test cases and clustering program we used.
#include "cv.h"
#include "highgui.h"
#include <iostream>
void show_result(const cv::Mat& labels, const cv::Mat& centers, int height, int width);
int main(int argc, const char * argv[])
{
cv::Mat image = cv::imread("kmean.jpg");
if ( image.empty() ) {
std::cout << "unable to load an input image\n";
return 1;
}
//cv::cvtColor(image,image,CV_BGR2HSV);
std::cout << "image: " << image.rows << ", " << image.cols << std::endl;
assert(image.type() == CV_8UC3);
cv::imshow("image", image);
cv::Mat reshaped_image = image.reshape(1, image.cols * image.rows);
std::cout << "reshaped image: " << reshaped_image.rows << ", " << reshaped_image.cols << std::endl;
assert(reshaped_image.type() == CV_8UC1);
//check0(image, reshaped_image);
cv::Mat reshaped_image32f;
reshaped_image.convertTo(reshaped_image32f, CV_32FC1, 1.0 / 255.0);
std::cout << "reshaped image 32f: " << reshaped_image32f.rows << ", " << reshaped_image32f.cols << std::endl;
assert(reshaped_image32f.type() == CV_32FC1);
cv::Mat labels;
int cluster_number = 4;
cv::TermCriteria criteria(cv::TermCriteria::COUNT, 100, 1);
cv::Mat centers;
cv::kmeans(reshaped_image32f, cluster_number, labels, criteria, 1, cv::KMEANS_PP_CENTERS, centers);
show_result(labels, centers, image.rows,image.cols);
return 0;
}
void show_result(const cv::Mat& labels, const cv::Mat& centers, int height, int width)
{
std::cout << "===\n";
std::cout << "labels: " << labels.rows << " " << labels.cols << std::endl;
std::cout << "centers: " << centers.rows << " " << centers.cols << std::endl;
assert(labels.type() == CV_32SC1);
assert(centers.type() == CV_32FC1);
cv::Mat rgb_image(height, width, CV_8UC3);
cv::MatIterator_<cv::Vec3b> rgb_first = rgb_image.begin<cv::Vec3b>();
cv::MatIterator_<cv::Vec3b> rgb_last = rgb_image.end<cv::Vec3b>();
cv::MatConstIterator_<int> label_first = labels.begin<int>();
cv::Mat centers_u8;
centers.convertTo(centers_u8, CV_8UC1, 255.0);
cv::Mat centers_u8c3 = centers_u8.reshape(3);
while ( rgb_first != rgb_last ) {
const cv::Vec3b& rgb = centers_u8c3.ptr<cv::Vec3b>(*label_first)[0];
*rgb_first = rgb;
++rgb_first;
++label_first;
}
cv::imshow("tmp", rgb_image);
cv::waitKey();
}
Would-1 with Background : (two clusters)
Would-1 with out Background :
Would-2 with Background :
Would-2 with out Background :(three clusters)
When we remove background we are getting a bit better segmentation, but for removing background we are using grab-cut which relies on manual operation. So we need a substitute for the kmean-clustering for segmenting image (or) some improvements in above code to achieve 100% success cases.
So is there any better way to segment the wounds ??
Instead of attempting to use the traditional wavelet transform, you may want to try Haar-like wavelets tuned for object detection tasks, similar to the basis of integral images used in the Viola Jones face detector. This paper by Lienhart et al, used for generic object detection, would be a good start.
From the looks of your example images, the variance of intensities within small pixel neighbourhoods in the wound is a lot higher, whereas the unbruised skin appears to be fairly uniform in small neighbourhoods. The Lienhart paper should be able to detect such variations - you can either feed the features into a machine learning setup, or just make manual observations and define the search windows and related heuristics.
Hope this helps.

Error level analysis in Image

How do I compute ELA for an image? I would like to get similar ELA image using opencv http://fotoforensics.com/tutorial-ela.php
As per this tutorial, I resaved the image at 95% quality jpeg image and using absDiff method to compute the difference between the source image and the resaved image but all I am getting is zero difference.
Any help on how to compute the difference between two images so as to obtain the error level just like sample images in the tutorial?
The key to achieve a similar result is to use a variable value for the compression rate and a scale factor to make it easier to visualize the data.
Here's an example: we have the input image (left) and the processed image after some parameter adjustments (right):
As expected, the region with the christmas hat presents a different compression rate from the rest of the image. This result is very similar to what FotoForensics presents:
With a few tweaks on this code you can achieve an even closer result. The source code of this project can be found on my Github:
main.cpp:
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
// Control
int scale = 15,
quality = 75;
// Image containers
cv::Mat input_image,
compressed_image;
void processImage(int, void*)
{
// Setting up parameters and JPEG compression
std::vector<int> parameters;
parameters.push_back(CV_IMWRITE_JPEG_QUALITY);
parameters.push_back(quality);
cv::imwrite("temp.jpg", input_image, parameters);
// Reading temp image from the disk
compressed_image = cv::imread("temp.jpg");
if (compressed_image.empty())
{
std::cout << "> Error loading temp image" << std::endl;
exit(EXIT_FAILURE);
}
cv::Mat output_image = cv::Mat::zeros(input_image.size(), CV_8UC3);
// Compare values through matrices
for (int row = 0; row < input_image.rows; ++row)
{
const uchar* ptr_input = input_image.ptr<uchar>(row);
const uchar* ptr_compressed = compressed_image.ptr<uchar>(row);
uchar* ptr_out = output_image.ptr<uchar>(row);
for (int column = 0; column < input_image.cols; column++)
{
// Calc abs diff for each color channel multiplying by a scale factor
ptr_out[0] = abs(ptr_input[0] - ptr_compressed[0]) * scale;
ptr_out[1] = abs(ptr_input[1] - ptr_compressed[1]) * scale;
ptr_out[2] = abs(ptr_input[2] - ptr_compressed[2]) * scale;
ptr_input += 3;
ptr_compressed += 3;
ptr_out += 3;
}
}
// Shows processed image
cv::imshow("Error Level Analysis", output_image);
}
int main (int argc, char* argv[])
{
// Verifica se o número de parâmetros necessário foi informado
if (argc < 2)
{
std::cout << "> You need to provide an image as parameter" << std::endl;
return EXIT_FAILURE;
}
// Read the image
input_image = cv::imread(argv[1]);
// Check image load
if (input_image.empty())
{
std::cout << "> Error loading input image" << std::endl;
return EXIT_FAILURE;
}
// Set up window and trackbar
cv::namedWindow("Error Level Analysis", CV_WINDOW_AUTOSIZE);
cv::imshow("Error Level Analysis", input_image);
cv::createTrackbar("Scale", "Error Level Analysis", &scale, 100, processImage);
cv::createTrackbar("Quality", "Error Level Analysis", &quality, 100, processImage);
// Press 'q' to quit
while (char(cv::waitKey(0)) != 'q') {};
return EXIT_SUCCESS;
}
Here are some nice references that were used to build this mash-up:
ELA with HTML5
FotoForensics Tutorial
Blackhat USA '07 Paper

Creating a Mat object from a YV12 image buffer

I have a buffer which contains an image in YV12 format. Now I want to either convert this buffer to RGB format or create a Mat object from it directly! Can someone help me? I tried this code :
cv::Mat input(widthOfImg, heightOfImg, CV_8UC1, vy12Buffer);
cv::Mat converted;
cv::cvtColor(input, converted, CV_YUV2RGB_YV12);
That's possible.
cv::Mat picYV12 = cv::Mat(nHeight * 3/2, nWidth, CV_8UC1, yv12DataBuffer);
cv::Mat picBGR;
cv::cvtColor(picYV12, picBGR, CV_YUV2BGR_YV12);
cv::imwrite("test.bmp", picBGR); //only for test
Opencv color conversion flags
The height is multiplied by 3/2 because there are 4 Y samples, and 1 U and 1 V sample stored for every 2x2 square of pixels. This results in a byte sample to pixel ratio of 3/2
4*1+1+1 samples per 2*2 pixels = 6/4 = 3/2
YV12 Format
Correction: In the last version of OpenCV (i use oldest 2.4.13 version) is color conversion code changed to
COLOR_YUV2BGR_YV12
cv::cvtColor(picYV12, picBGR, COLOR_YUV2BGR_YV12);
here is the corresponding version in java (Android)...
This method was faster than other techniques like renderscript or opengl(glReadPixels) for getting bitmap from yuv12/i420 data stream (tested with webrtc i420 ).
long startTimei = SystemClock.uptimeMillis();
Mat picyv12 = new Mat(768,512,CV_8UC1); //(im_height*3/2,im_width), should be even no...
picyv12.put(0,0,return_buff); // buffer - byte array with i420 data
Imgproc.cvtColor(picyv12,picyv12,COLOR_YUV2RGB_YV12);// or use COLOR_YUV2BGR_YV12 depending on output result
long endTimei = SystemClock.uptimeMillis();
Log.d("i420_time", Long.toString(endTimei - startTimei));
Log.d("picyv12_size", picyv12.size().toString()); // Check size
Log.d("picyv12_type", String.valueOf(picyv12.type())); // Check type
Utils.matToBitmap(picyv12,tbmp2); // Convert mat to bitmap (height, width) i.e (512,512) - ARGB_888
save(tbmp2,"itest"); // Save bitmap
That's impossible.
Y'UV420p is a planar format, meaning that the Y', U, and V values are
grouped together instead of interspersed. The reason for this is that
by grouping the U and V values together, the image becomes much more
compressible. When given an array of an image in the Y'UV420p format,
all the Y' values come first, followed by all the U values, followed
finally by all the V values.
but cv::Mat is a RGB color model, and arranged like B0 G0 R0 B1 G1 R1... So,we can't create a Mat object from a YV12 buffer directly.
Here is an example:
cv::Mat Yv12ToRgb( uchar *pBuffer,long bufferSize, int width,int height )
{
cv::Mat result(height,width,CV_8UC3);
uchar y,cb,cr;
long ySize=width*height;
long uSize;
uSize=ySize>>2;
assert(bufferSize==ySize+uSize*2);
uchar *output=result.data;
uchar *pY=pBuffer;
uchar *pU=pY+ySize;
uchar *pV=pU+uSize;
uchar r,g,b;
for (int i=0;i<uSize;++i)
{
for(int j=0;j<4;++j)
{
y=pY[i*4+j];
cb=ucharpU[i];
cr=ucharpV[i];
//ITU-R standard
b=saturate_cast<uchar>(y+1.772*(cb-128));
g=saturate_cast<uchar>(y-0.344*(cb-128)-0.714*(cr-128));
r=saturate_cast<uchar>(y+1.402*(cr-128));
*output++=b;
*output++=g;
*output++=r;
}
}
return result;
}
You can try as YUV_I420 array
char filePath[3000];
int width, height;
cout << "file path = ";
cin >> filePath;
cout << "width = ";
cin >> width;
cout << "height = ";
cin >> height;
FILE *pFile = fopen(filePath, "rb");
unsigned char* buff = new unsigned char[width * height *3 / 2];
fread(buff, 1, width * height* 3 / 2, pFile);
fclose(pFile);
cv::Mat imageRGB;
cv::Mat picI420 = cv::Mat(height * 3 / 2, width, CV_8UC1, buff);
cv::cvtColor(picI420, imageRGB, CV_YUV2BGRA_I420);
imshow("imageRGB", imageRGB);
waitKey(0);

inRange Query image output

I've started learning opencv and have written the code below to obtain the output of an image (from input camera) in hsv and an inRange image !
the hsv output is fine but inRange o/p is just blank :| plz help am stumped
int main(int argc[], char** argv[])
{
VideoCapture camera(CV_CAP_ANY);
Mat input;
Mat output(Size(input.size().height,input.size().width),input.type());
Mat img_thresh(Size(640,480),input.type());
namedWindow("input",0);
namedWindow("output",0);
namedWindow("threshold",0);
cv::Scalar hsv_min = cvScalar(0, 30, 80, 0);
cv::Scalar hsv_max = cvScalar(20, 150, 255, 0);
for(;;)
{
camera >> input;
cvtColor(input,output,CV_BGR2HSV,1);
cv::inRange(input,hsv_min,hsv_max,img_thresh);
imshow("input",input);
imshow("output",output);
imshow("threshold",img_thresh);
cv::waitKey(40);
}
return 0;
}
You apply the inRange function to the input BGR image. You have to apply it to the HSV image, named output in your code. So the line should be :
cv::inRange(output,hsv_min,hsv_max,img_thresh);
Your code was working but you did not use the right image !
If you want to know HSV value in your image I suggest you to use :
cvSetMouseCallback("input", getObjectColor);
And :
void getObjectColor(int event, int x, int y, int flags, void *param = NULL) {
// Vars
CvScalar pixel;
IplImage *hsv;
if(event == CV_EVENT_LBUTTONUP) {
// Get the hsv image
hsv = cvCloneImage(image);
cvCvtColor(image, hsv, CV_BGR2HSV);
// Get the selected pixel
pixel = cvGet2D(hsv, y, x);
cvShowImage("getObjColor", hsv);
// Change the value of the tracked color with the color of the selected pixel
h = (int)pixel.val[0];
s = (int)pixel.val[1];
v = (int)pixel.val[2];
cout << "Color HSV = h:" << pixel.val[0] << " | s:" << pixel.val[1] << " | v:" << pixel.val[2] << endl;
// Release the memory of the hsv image
cvReleaseImage(&hsv);
}
}
You will need to create some variables to make this work, the code was taken from the internet (can't remember where !)

Resources