how find index of max in each row of Opencv Mat object - opencv

I have an OpenCv Mat.The Mat is response of MLP Neural Network. how can i find the index of maximum value in each row?

You can use minMaxLoc to do this.
Mat img = imread("image.jpg"), row;
double min=0, max=0;
Point minLoc, maxLoc;
for (int i = 0; i < img.rows; i++)
{
row = img.row(i);
//maxLoc contains coordinate of maximum value
minMaxLoc(row, &min, &max, &minLoc, &maxLoc);
}

Using minMaxIdx for each row (as mentioned before) might be more straightforward:
cv::minMaxIdx

void GetMaxValueIndex(const cv::Mat src_mat) {
double min_value;
int minidx;
std::vector<double> min_value_vec;
std::vector<int> min_idx_vec;
for (int i = 0; i < src_mat.rows; i++) {
cv::minMaxIdx(cls_confs.row(i), &minvalue, NULL, &minidx, NULL);
min_value_vec.push_back(min_value);
min_idx_vec.push_back(min_idx);
}
}

Related

How to apply K means in a mask of an image instead the whole one

I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.

opencv cv::mat not returning the same result

int sizeOfChannel = (_width / 2) * (_height / 2);
double* channel_gr = new double[sizeOfChannel];
// filling the data into channel_gr....
cv::Mat my( _width/2, _height/2, CV_32F,channel_gr);
cv::Mat src(_width/2, _height/2, CV_32F);
for (int i = 0; i < (_width/2) * (_height/2); ++i)
{
src.at<float>(i) = channel_gr[i];
}
cv::imshow("src",src);
cv::imshow("my",my);
cv::waitKey(0);
I'm wondering why i'm not getting the same image in my and src imshow
update:
I have changed my array into double* still same result;
I think it is something to do with steps?
my image output
src image output
this one works for me:
int halfWidth = _width/2;
int halfHeight = _height/2;
int sizeOfChannel = halfHeight*halfWidth;
// ******************************* //
// you use CV_321FC1 later so it is single precision float
float* channel_gr = new float[sizeOfChannel];
// filling the data into channel_gr....
for(int i=0; i<sizeOfChannel; ++i) channel_gr[i] = i/(float)sizeOfChannel;
// ******************************* //
// changed row/col ordering, but this shouldnt be important
cv::Mat my( halfHeight , halfWidth , CV_32FC1,channel_gr);
cv::Mat src(halfHeight , halfWidth, CV_32FC1);
// ******************************* //
// changed from 1D indexing to 2D indexing
for(int y=0; y<src.rows; ++y)
for(int x=0; x<src.cols; ++x)
{
int arrayPos = y*halfWidth + x;
// you have a 2D mat so access it in 2D
src.at<float>(y,x) = channel_gr[arrayPos ];
}
cv::imshow("src",src);
cv::imshow("my",my);
// check for differences
cv::imshow("diff1 > 0",src-my > 0);
cv::imshow("diff2 > 0",my-src > 0);
cv::waitKey(0);
'my' is array of floats but you give it pointer to arrays of double. There no way it can get data from this array properly.
It seems that the constructor version that you are using is
Mat::Mat(int rows, int cols, int type, const Scalar& s)
This is from OpenCV docs. Seems like you are using float for src and assigning from channel_gr (declared as double). Isn't that some form of precision loss?

EM clustering based background foreground segmentation in OPENCV

I tried to perform EM based back ground foreground segmentation using a code below...which I also found in Stackoverflow....But seems there is some error somewhere as I dont ever see the second printf statement to get executed... . basically it is never reaching the classification/clustering part of the code..The code is given below..Could someone help me on this ?
#include <opencv2/opencv.hpp>
#include <opencv2/legacy/legacy.hpp>
char str1[60];
int main()
{
cv::Mat source = cv::imread("C:\\Image Input\\part1.bmp" );
if(!source.data)
printf(" No data \n");
//ouput images
cv::Mat meanImg(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg(source.rows, source.cols, CV_8UC3);
//convert the input image to float
cv::Mat floatSource;
source.convertTo(floatSource, CV_32F);
//now convert the float image to column vector
cv::Mat samples(source.rows * source.cols, 3, CV_32FC1);
int idx = 0;
for (int y = 0; y < source.rows; y++) {
cv::Vec3f* row = floatSource.ptr<cv::Vec3f > (y);
for (int x = 0; x < source.cols; x++) {
samples.at<cv::Vec3f > (idx++, 0) = row[x];
}
}
printf(" After Loop \n");
//we need just 2 clusters
cv::EMParams params(2);
cv::ExpectationMaximization em(samples, cv::Mat(), params);
//the two dominating colors
cv::Mat means = em.getMeans();
//the weights of the two dominant colors
cv::Mat weights = em.getWeights();
//we define the foreground as the dominant color with the largest weight
const int fgId = weights.at<float>(0) > weights.at<float>(1) ? 0 : 1;
printf(" After Training \n");
//now classify each of the source pixels
idx = 0;
for (int y = 0; y < source.rows; y++)
{
printf(" Now Classify\n");
for (int x = 0; x < source.cols; x++)
{
//classify
const int result = cvRound(em.predict(samples.row(idx++), NULL));
//get the according mean (dominant color)
const double* ps = means.ptr<double>(result, 0);
//set the according mean value to the mean image
float* pd = meanImg.ptr<float>(y, x);
//float images need to be in [0..1] range
pd[0] = ps[0] / 255.0;
pd[1] = ps[1] / 255.0;
pd[2] = ps[2] / 255.0;
//set either foreground or background
if (result == fgId) {
fgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
} else {
bgImg.at<cv::Point3_<uchar> >(y, x, 0) = source.at<cv::Point3_<uchar> >(y, x, 0);
}
}
}
printf(" Show Images \n");
cv::imshow("Means", meanImg);
cv::imshow("Foreground", fgImg);
cv::imshow("Background", bgImg);
cv::waitKey(0);
return 0;
}
The code works fine. I think that you use too large images, and learning takes too long time. Try process small images.
Just 1 correction, initialize images with zeros:
//ouput images
cv::Mat meanImg=Mat::zeros(source.rows, source.cols, CV_32FC3);
cv::Mat fgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);
cv::Mat bgImg=Mat::zeros(source.rows, source.cols, CV_8UC3);

Can we use openCV to load and resize 2d image that had indexed colors?

So, I have an image cv::Mat created as an indexed 2D matrix with colors 1,2,3,... up to 255. I want to resize my image all at once but do it like I currently do - individually for each index, so as not to get mixed colors:
//...
std::map<unsigned char , cv::Mat* > clusters;
for(int i = 0; i < sy; ++i)
{
for(int j = 0; j < sx; ++j)
{
unsigned char current_k = image[i][j];
if (clusters[current_k] == NULL) {
clusters[current_k] = new cv::Mat();
(*clusters[current_k]) = cv::Mat::zeros(cv::Size(sx, sy), CV_8UC1);
}
(*clusters[current_k]).row(i).col(j) = 255;
}
}
std::vector<cv::Mat> result;
for( std::map<unsigned char, cv::Mat*>::iterator it = clusters.begin(); it != clusters.end(); ++it )
{
cv::Mat filled(cv::Size(w, h), (*it->second).type());
cv::resize((*it->second), filled, filled.size(), 0,0, CV_INTER_CUBIC);
cv::threshold( filled, filled, 1, 255, CV_THRESH_BINARY);
result.push_back(filled);
}
So, can OpenCV help me with the automation of my indexed image (so that I could not create cv::Mat per each cluster for a correct resize)?
you can use the Remap function with your own mash to interpolate the values as you'de like
take a look at this tutorial (Link)

Accessing value at row,col in a Matrix

I'm trying to access a specific row in a matrix but am having a hard time doing so.
I want to get the value at row j, column i but I don't think my algorithm is correct. I'm using OpenCV's Mat for my matrix and accessing it through the data member.
Here is how I am attempting to access values:
plane.data[i + j*plane.rows]
Where i = the column, j = the row. Is this correct? The Matrix is 1 plane from a YUV matrix.
Any help would be appreciated! Thanks.
No, your are wrong
plane.data[i + j*plane.rows] is not a good way to access pixel. Your pointer must depend on type of the matrix and its depth.
You should use at() operator of the matrix.
To make it simple here is a code sample which access each pixel of a matrix and prints it. It works almost for every matrix type and for any number of channels:
void printMat(const Mat& M){
switch ( (M.dataend-M.datastart) / (M.cols*M.rows*M.channels())){
case sizeof(char):
printMatTemplate<unsigned char>(M,true);
break;
case sizeof(float):
printMatTemplate<float>(M,false);
break;
case sizeof(double):
printMatTemplate<double>(M,false);
break;
}
}
template <typename T>
void printMatTemplate(const Mat& M, bool isInt = true){
if (M.empty()){
printf("Empty Matrix\n");
return;
}
if ((M.elemSize()/M.channels()) != sizeof(T)){
printf("Wrong matrix type. Cannot print\n");
return;
}
int cols = M.cols;
int rows = M.rows;
int chan = M.channels();
char printf_fmt[20];
if (isInt)
sprintf_s(printf_fmt,"%%d,");
else
sprintf_s(printf_fmt,"%%0.5g,");
if (chan > 1){
// Print multi channel array
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
printf("(");
const T* Pix = &M.at<T>(i,j);
for (int c = 0; c < chan; c++){
printf(printf_fmt,Pix[c]);
}
printf(")");
}
printf("\n");
}
printf("-----------------\n");
}
else {
// Single channel
for (int i = 0; i < rows; i++){
const T* Mi = M.ptr<T>(i);
for (int j = 0; j < cols; j++){
printf(printf_fmt,Mi[j]);
}
printf("\n");
}
printf("\n");
}
}
I do not think there is anything different between accessing RGB Mat and YUV Mat. Its just the colorspace different.
Please refer to http://opencv.willowgarage.com/wiki/faq#Howtoaccessmatrixelements.3F on how to access each pixel.

Resources