c++ vector push_back inconsistent behavior - opencv

I push back two sets(A={A1,A2,A3},B={B1,B2,B3}) of equal matrices(A1=B1,A2=B2,A3=B3) of same type(CV_32FC) in two different vectors(Va and Vb) of same type. When i compare the contents of the vectors pair by pair(Va[0] vs Vb[0], Va[1] vs Vb[1],Va[2] vs Vb[2])they are different. How is this possible?
Code explanation:
A= imgs_lab_channels.
Lab_channel_current_FG = Foreground image
Lab_channel_current_BG = Background image
Lab_channel_current=Lab_channel_current_FG+Lab_channel_current_BG
push Lab channel_current into vector Lab_channels
So B=Lab_channels
I check that
Lab_channel_current= imgs_lab_channels[i].
When I read back the matrices from the vectors A and B they are different.
Code snippet:
std::vector<cv::Mat> imgs_lab_channels;
split(imgs_lab, imgs_lab_channels);
std::vector<cv::Mat> Lab_channels;
cv::Mat Lab_channel_current_FG;
cv::Mat Lab_channel_current_BG;
cv::Mat Lab_channel_current;
for(int i = 0; i < 3; i++)
{
// FG_mask and BG_mask are 0-1 binary matrices of type 32FC1
// and with same size as imgs_lab_channels. FG_mask and BG_mask
// select the Foreground and background respectively. Omitted for
// sake of clarity
Lab_channel_current_FG=imgs_lab_channels[i].mul(FG_mask);
Lab_channel_current_BG=imgs_lab_channels[i].mul(BG_mask);
// add the FG and the BG image
Lab_channel_current=Lab_channel_current_FG+Lab_channel_current_BG;
Lab_channels.push_back(Lab_channel_current);
}
for(int j=0;j<3;j++)
{
temp2 = Lab_channels[j]-imgs_lab_channels[j];
double min2, max2;
cv::minMaxLoc(temp2, &min2, &max2);
std::cout << "temp2 min,max="<< min2 << "," << max2 << std::endl;
}

Related

How to use cv::decode (access image) correct?

I need help with the following problem:
Task script:
read in the message sensor_msgs/PointCloud2, display Bird Eye View image and save (png or jpg).
Desired new function:
Send out the displayed images directly as an image message.
Problem:
cv::Mat *bgr is the matrix that contains the image and gives it to a map (for visualisation only).
Solutions by others/so far:
opencv read jpeg image from buffer //
How to use cv::imdecode, if the contents of an image file are in a char array?
Using different member functions, but unsuccessful.
Code reduced to necessary snippets
(complete version here: https://drive.google.com/file/d/1HI3E4nM9mQ--oNh1Q7zfwRFGJB5JRiGD/view?usp=sharing)
// Global Publishers/Subscribers
ros::Subscriber subPointCloud;
ros::Publisher pubPointCloud;
image_transport::Publisher pubImage;
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_grid (new pcl::PointCloud<pcl::PointXYZ>);
sensor_msgs::PointCloud2 output;
// create Matrix to store pointcloud data
cv::Mat *heightmap, *hsv, *bgr;
std::vector<int> compression_params;
std::vector<String> fn; //filename
cv::Mat image;
// main generation function
void DEM(const sensor_msgs::PointCloud2ConstPtr& pointCloudMsg)
{
ROS_DEBUG("Point Cloud Received");
// clear cloud and height map array
lowest = FLT_MAX;
for(int i = 0; i < IMAGE_HEIGHT; ++i){
for(int j = 0; j < IMAGE_WIDTH; ++j){
heightArray[i][j] = (double)(-FLT_MAX);
}
}
// Convert from ROS message to PCL point cloud
pcl::fromROSMsg(*pointCloudMsg, *cloud);
// Populate the DEM grid by looping through every point
int row, column;
for(size_t j = 0; j < cloud->points.size(); ++j){
// If the point is within the image size bounds
if(map_pc2rc(cloud->points[j].x, cloud->points[j].y, &row, &column) == 1 && row >= 0 && row < IMAGE_HEIGHT && column >=0 && column < IMAGE_WIDTH){
if(cloud->points[j].z > heightArray[row][column]){
heightArray[row][column] = cloud->points[j].z;
}
// Keep track of lowest point in cloud for flood fill
else if(cloud->points[j].z < lowest){
lowest = cloud->points[j].z;
}
}
}
// Create "point cloud" and opencv image to be published for visualization
int index = 0;
double x, y;
for(int i = 0; i < IMAGE_HEIGHT; ++i){
for(int j = 0; j < IMAGE_WIDTH; ++j){
// Add point to cloud
(void)map_rc2pc(&x, &y, i, j);
cloud_grid->points[index].x = x;
cloud_grid->points[index].y = y;
cloud_grid->points[index].z = heightArray[i][j];
++index;
// Add point to image
cv::Vec3b &pixel_hsv = hsv->at<cv::Vec3b>(i,j); // access pixels vector HSV
cv::Vec3b &pixel_bgr = heightmap->at<cv::Vec3b>(i,j); // access pixels vector BGR
if(heightArray[i][j] > -FLT_MAX){
//Coloured Pixel Pointcloud
pixel_hsv[0] = map_m2i(heightArray[i][j]); // H - color value (hue)
pixel_hsv[1] = 255; // S -color saturation
pixel_hsv[2] = 255; // V - brightness
// White Pixel PointCloud
pixel_bgr[0] = map_m2i(heightArray[i][j]); // B
pixel_bgr[1] = map_m2i(heightArray[i][j]); // G
pixel_bgr[2] = map_m2i(heightArray[i][j]); // R
}
else{
// Coloured Pixel Pointcloud
pixel_hsv[0] = 0;
pixel_hsv[1] = 0;
pixel_hsv[2] = 0;
// White Pixel Pointcloud
pixel_bgr[0] = 0;
pixel_bgr[1] = 0;
pixel_bgr[2] = 0; //map_m2i(lowest);
}
}
}
// Display image
cv::cvtColor(*hsv, *bgr, cv::COLOR_HSV2BGR); // HSV matrix (src) to BGR matrix (dst)
// Image denoising (filter strength, pixel size template patch, pixel size window)
//cv::fastNlMeansDenoising(*hsv,*bgr,30 , 7, 11);
// Image denoising (filter strength luminance, same colored, pixel size template patch, pixel size window)
//cv::fastNlMeansDenoisingColored(*hsv,*bgr,30 ,1, 7, 11);
// Plot HSV(colored) and BGR (b/w)
cv::imshow(WIN_NAME, *bgr); // show new HSV matrix
cv::imshow(WIN_NAME2, *heightmap); // show old BGR matrix
// Save image to disk
char filename[100];
// FLAG enable/disable saving function
if (save_to_disk == true)
{
// save JPG format
snprintf(filename, 100, "/home/pkatsoulakos/catkin_ws/images/image_%d.jpg", fnameCounter);
std::cout << filename << std::endl;
// JPG image writing
cv::imwrite(filename, *bgr, compression_params);
/* // generate pathnames matching a pattern
glob("/home/pkatsoulakos/catkin_ws/images/*.jpg",fn); // directory, filter pattern
// range based for loop
for (auto f:fn) // range declaration:range_expression
{
image = cv::imread(f, IMREAD_COLOR);
if (image.empty())
{
std::cout << "!!! Failed imread(): image not found" << std::endl;
}
}*/
// Approach 2
//cv::Mat rawdata(1, bgr,CV_8UC1,(void*)bgr);
image = cv::imdecode(cv::Mat(*bgr, CV_8UC3, CV_AUTO_STEP), IMREAD_COLOR);
//image = cv::imdecode(cv::Mat(*bgr, CV_8UC1), IMREAD_UNCHANGED);
if (image.data == NULL)
{
std::cout << "!!! Failed imread(): image not found" << std::endl;
}
/* // save PNG format
snprintf(filename, 100, "/home/pkatsoulakos/catkin_ws/images/image_%d.png", fnameCounter);
std::cout << filename << std::endl;
// PNG image writing
// cv::imwrite(filename, *heightmap, compression_params);*/
}
++fnameCounter;
// Output height map to point cloud for python node to parse to PNG
pcl::toROSMsg(*cloud_grid, output);
output.header.stamp = ros::Time::now();
output.header.frame_id = "yrl_cloud_id"; // fixed frame (oblique alignment) from LiDAR
pubPointCloud.publish(output);
// Publish bird_view img
cv_bridge::CvImage cv_bird_view;
cv_bird_view.header.stamp = ros::Time::now();
cv_bird_view.header.frame_id = "out_bev_image";
cv_bird_view.encoding = "bgr8";
cv_bird_view.image = image;
pubImage.publish(cv_bird_view.toImageMsg());
// Output Image
//sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
//pubImage.publish(msg);pubPoin
}
int main(int argc, char** argv)
{
ROS_INFO("Starting LIDAR Node");
ros::init(argc, argv, "lidar_node");
ros::NodeHandle nh;
image_transport::ImageTransport it(nh);
// Setup image
cv::Mat map(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat map2(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat map3(IMAGE_HEIGHT, IMAGE_WIDTH, CV_8UC3, cv::Scalar(0, 0, 0));
// H S V
// image container
heightmap = &map; // default source code (mcshiggings)
hsv = &map2; // added for hSV visualization
bgr = &map3; // for displaying colored Pc
cv::namedWindow(WIN_NAME, WINDOW_AUTOSIZE);
cv::namedWindow(WIN_NAME2, WINDOW_AUTOSIZE);
cv::startWindowThread();
cv::imshow(WIN_NAME, *bgr); // BGR visualization of HSV
cv::imshow(WIN_NAME2, *heightmap); // default visualization
// Setup Image Output Parameters
fnameCounter = 0;
lowest = FLT_MAX;
/* PNG compression param
compression_params.push_back(IMWRITE_PNG_COMPRESSION);
A higher value means a smaller size and longer compression time. Default value is 3.
compression_params.push_back(9); */
// JPG compression param
compression_params.push_back(IMWRITE_JPEG_QUALITY);
// from 0 to 100 (the higher is the better). Default value is 95.
compression_params.push_back(95);
// Setup indicies in point clouds
/*
int index = 0;
double x, y;
for(int i = 0; i < IMAGE_HEIGHT; ++i){
for(int j = 0; j < IMAGE_WIDTH; ++j){
index = i * j;
(void)map_rc2pc(&x, &y, i, j);
cloud_grid->points[index].x = x;
cloud_grid->points[index].y = y;
cloud_grid->points[index].z = (-FLT_MAX)master.log
}
*/
// subscriber and publisher
subPointCloud = nh.subscribe<sensor_msgs::PointCloud2>("/pointcloud", 2, DEM);
pubPointCloud = nh.advertise<sensor_msgs::PointCloud2> ("/heightmap/pointcloud", 1);
pubImage = it.advertise("/out_bev_image",1);
ros::spin();
return 0;
}
Thank you for any advice and suggested solutions.
You can't simply pass the char array to opencv functions to create an image because of how the data is formatted. PointCloud2 data fields are strictly containing information about where a point lives in 3d space(think [x,y,z]); this means nothing in terms of an actual image. Instead you have to first convert the pointcloud into something that better represents an image. Luckily, this already exists. Check out the CloudToImage ROS package.

partition a set of images into k clusters with opencv

I have an image data set that I would like to partition into k clusters. I am trying to use the opencv implementation of k-means clustering.
Firstly, I store my Mat images into a vector of Mat and then I am trying to use the kmeans function. However, I am getting an assertion error.
Should the images be stored into a different kind of structure? I have read the k-means documentation and I dont seem to understand what I am doing wrong. This is my code:
Thank you in advance,
vector <Mat> images;
string folder = "D:\\football\\positive_clustering\\";
string mask = "*.bmp";
vector<string> files = getFileList(folder + mask);
for (int i = 0; i < files.size(); i++)
{
Mat img = imread(folder + files[i]);
images.push_back(img);
}
cout << "Vector of positive samples created" << endl;
int k = 10;
cv::Mat bestLabels;
cv::kmeans(images, k, bestLabels, TermCriteria(), 3, KMEANS_PP_CENTERS);
//have a look
vector<cv::Mat> clusterViz(bestLabels.rows);
for (int i = 0; i<bestLabels.rows; i++)
{
clusterViz[bestLabels.at<int>(i)].push_back(cv::Mat(images[bestLabels.at<int>(i)]));
}
namedWindow("clusters", WINDOW_NORMAL);
for (int i = 0; i<clusterViz.size(); i++)
{
cv::imshow("clusters", clusterViz[i]);
cv::waitKey();
}

OpenCV - get coordinates of top of object in contour

Given a contour such as the one seen below, is there a way to get the X,Y coordinates of the top point in the contour? I'm using Python, but other language examples are fine.
Since every pixel needs to be checked, I'm afraid you will have to iterate linewise over the image and see which is the first white pixel.
You can iterate over the image until you encounter a pixel that isn't black.
I will write an example in C++.
cv::Mat image; // your binary image with type CV_8UC1 (8-bit 1-channel image)
int topRow(-1), topCol(-1);
for(int i = 0; i < image.rows; i++) {
uchar* ptr = image.ptr<uchar>(i);
for(int j = 0; j < image.cols; j++) {
if(ptr[j] != 0) {
topRow = i;
topCol = j;
std::cout << "Top point: " << i << ", " << j << std::endl;
break;
}
}
if(topRow != -1)
break;
}

Convert matlab image svd method to opencv

I want to write a program with opencv by c++ in the visual studio.
My code is following matlab code:
close all
clear all
clc
%reading and converting the image
inImage=imread('pic.jpg');
inImageD=double(inImage);
[U,S,V]=svd(inImageD);
% Using different number of singular values (diagonal of S) to compress and
% reconstruct the image
dispEr = [];
numSVals = [];
for N=5:25:300
% store the singular values in a temporary var
C = S;
% discard the diagonal values not required for compression
C(N+1:end,:)=0;
C(:,N+1:end)=0;
% Construct an Image using the selected singular values
D=U*C*V';
% display and compute error
figure;
buffer = sprintf('Image output using %d singular values', N)
imshow(uint8(D));
title(buffer);
error=sum(sum((inImageD-D).^2));
% store vals for display
dispEr = [dispEr; error];
numSVals = [numSVals; N];
end
What's your opinion to do this? I want to save image in a text file and retrieve it from file into the Mat array. I've written this part as follow:
Mat image;
FileStorage read_file("pic_file.txt", FileStorage::READ);
read_file["pic"] >> image;
read_file.release();
Mat P;
image.convertTo(P, CV_32FC3,1.0/255);
SVD svda(P); //or SVD::compute(P,W,U,V);
But I have problem with SVD function and it doesn't work. Is there anything to do for computing SVD compression of an image?
Thank You so much.
Vahids.
Here is my code:
int main(int argc, char* argv[])
{
// Image matrix
Mat img;
Mat result;
//---------------------------------------------
//
//---------------------------------------------
namedWindow("Source Image");
namedWindow("Result");
// Load image in grayscale mode
img=imread("D:\\ImagesForTest\\cat.bmp",0);
img.convertTo(img,CV_32FC1,1.0/255.0);
cout << "Source size:" << img.rows*img.cols <<" elements "<< endl;
// create SVD
cv::SVD s;
// svd result
Mat w,u,vt;
// computations ...
s.compute(img,w,u,vt);
// collect Sigma matrix (diagonal - is eigen values, other - zeros)
// we got it in as vector, transform it to diagonal matrix
Mat W=Mat::zeros(w.rows,w.rows,CV_32FC1);
for(int i=0;i<w.rows;i++)
{
W.at<float>(i,i)=w.at<float>(i);
}
// reduce rank to k
int k=25;
W=W(Range(0,k),Range(0,k));
u=u(Range::all(),Range(0,k));
vt=vt(Range(0,k),Range::all());
// Get compressed image
result=u*W*vt;
cout << "Result size:" << u.rows*u.cols+k+vt.rows*vt.cols <<" elements "<< endl;
//---------------------------------------------
//
//---------------------------------------------
imshow("Source Image", img);
imshow("Result", result);
cvWaitKey(0);
return 0;
}
Source and result images.

Error level analysis in Image

How do I compute ELA for an image? I would like to get similar ELA image using opencv http://fotoforensics.com/tutorial-ela.php
As per this tutorial, I resaved the image at 95% quality jpeg image and using absDiff method to compute the difference between the source image and the resaved image but all I am getting is zero difference.
Any help on how to compute the difference between two images so as to obtain the error level just like sample images in the tutorial?
The key to achieve a similar result is to use a variable value for the compression rate and a scale factor to make it easier to visualize the data.
Here's an example: we have the input image (left) and the processed image after some parameter adjustments (right):
As expected, the region with the christmas hat presents a different compression rate from the rest of the image. This result is very similar to what FotoForensics presents:
With a few tweaks on this code you can achieve an even closer result. The source code of this project can be found on my Github:
main.cpp:
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <vector>
// Control
int scale = 15,
quality = 75;
// Image containers
cv::Mat input_image,
compressed_image;
void processImage(int, void*)
{
// Setting up parameters and JPEG compression
std::vector<int> parameters;
parameters.push_back(CV_IMWRITE_JPEG_QUALITY);
parameters.push_back(quality);
cv::imwrite("temp.jpg", input_image, parameters);
// Reading temp image from the disk
compressed_image = cv::imread("temp.jpg");
if (compressed_image.empty())
{
std::cout << "> Error loading temp image" << std::endl;
exit(EXIT_FAILURE);
}
cv::Mat output_image = cv::Mat::zeros(input_image.size(), CV_8UC3);
// Compare values through matrices
for (int row = 0; row < input_image.rows; ++row)
{
const uchar* ptr_input = input_image.ptr<uchar>(row);
const uchar* ptr_compressed = compressed_image.ptr<uchar>(row);
uchar* ptr_out = output_image.ptr<uchar>(row);
for (int column = 0; column < input_image.cols; column++)
{
// Calc abs diff for each color channel multiplying by a scale factor
ptr_out[0] = abs(ptr_input[0] - ptr_compressed[0]) * scale;
ptr_out[1] = abs(ptr_input[1] - ptr_compressed[1]) * scale;
ptr_out[2] = abs(ptr_input[2] - ptr_compressed[2]) * scale;
ptr_input += 3;
ptr_compressed += 3;
ptr_out += 3;
}
}
// Shows processed image
cv::imshow("Error Level Analysis", output_image);
}
int main (int argc, char* argv[])
{
// Verifica se o número de parâmetros necessário foi informado
if (argc < 2)
{
std::cout << "> You need to provide an image as parameter" << std::endl;
return EXIT_FAILURE;
}
// Read the image
input_image = cv::imread(argv[1]);
// Check image load
if (input_image.empty())
{
std::cout << "> Error loading input image" << std::endl;
return EXIT_FAILURE;
}
// Set up window and trackbar
cv::namedWindow("Error Level Analysis", CV_WINDOW_AUTOSIZE);
cv::imshow("Error Level Analysis", input_image);
cv::createTrackbar("Scale", "Error Level Analysis", &scale, 100, processImage);
cv::createTrackbar("Quality", "Error Level Analysis", &quality, 100, processImage);
// Press 'q' to quit
while (char(cv::waitKey(0)) != 'q') {};
return EXIT_SUCCESS;
}
Here are some nice references that were used to build this mash-up:
ELA with HTML5
FotoForensics Tutorial
Blackhat USA '07 Paper

Resources