OpenCV C++ Convert Byte array to Mat - opencv

How can I convert byte array to Mat which is received from socket ?.
My client application will send color image data like this
Mat frame; //colour image
int imgSize = frame.total()*frame.elemSize();
int bytes = send(clientSock, frame.data, imgSize, 0));//write to the socket
And the server will receives the data like
char sockData[imgSize];
Mat img;
for (int i = 0; i < imgSize; i += bytes) {
bytes = recv(connectSock, sockData +i, imgSize - i, 0));
}
// Write to mat
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
(img.row(i)).col(j) = (uchar)sockData[((img.cols)*i)+j];
}
}
I am getting distorted image at the receiver. Is there any problem in my code ?
Thanks in advance.......

If you have colour image you may read it in a math with 3 channels of uchar so change this piece of code:
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
(img.row(i)).col(j) = (uchar)sockData[((img.cols)*i)+j];
}
}
with this:
int baseIndex = 0;
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
img.at<cv::Vec3b>(i,j) = cv::Vec3b(sockData[baseIndex + 0],
sockData[baseIndex + 1],
sockData[baseIndex + 2]);
baseIndex = baseIndex + 3;
}
}
Maybe this should work.

Doesn't this work?
cv::Mat frame(img.rows, img.cols, CV_8UC3, sockData);
Just replace CV_8UC3 with the correct image format:
CV_<bit-depth>{U|S|F}C(<number_of_channels>)
see https://docs.opencv.org/2.4/modules/core/doc/basic_structures.html
Edit: There is a 5th additional field which can be useful. The number of bytes per row (in case there are a few padding bytes). In working with V4L2 today, I successfully used this cv::Mat constructor:
v4l2_format camera_format = ...; // see https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/vidioc-g-fmt.html#description
cv::Mat mat(camera_format.fmt.pix.height,
camera_format.fmt.pix.width,
CV_8UC3,
raw_data_ptr,
camera_format.fmt.pix.bytesperline);

I solved the problem using below code.
int ptr=0;
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
img.at<cv::Vec3b>(i,j) = cv::Vec3b(sockData[ptr+0],sockData[ptr+1],sockData[ptr+2]);
ptr=ptr+3;
}
}

Adding to Michele answer, one can also use the MatIterator to solve this.
cv::Mat m;
m.create(10, 10, CV_32FC3);
// This is the socket data.
float *array = (float *)malloc( 3*sizeof(float)*10*10 );
cv::MatIterator_<cv::Vec3f> it = m.begin<cv::Vec3f>();
for (unsigned i = 0; it != m.end<cv::Vec3f>(); it++ ) {
for ( unsigned j = 0; j < 3; j++ ) {
(*it)[j] = *(array + i );
i++;
}
}
Now you have a float cv::Mat. In case of 8 bit, simply change float to uchar and Vec3f to Vec3b and CV_32FC3 to CV_8UC3

Related

OpenCV Error: Assertion failed, mat.hpp line 548

first I gave the following error in the code
int KK = 10;
int colors[KK];
then I wrote const int instead of kk and fixed the error
I'm using the opencv library but I'm getting an error somewhere. When i and j are 713 48, they do not give an error, but in the next cycle, i and j are 713 and 49, and they give an error code.(OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1 << 3) - 1))*4) & 15) == elemSize1()) in cv::Mat::at, file c:\opencv\build\include\opencv2\core\mat.hpp, line 548)
the error gives in this row.
clustered.at(i,j) = (float)(colors[bestLabels.at(1,z)]);
I share the detailed code below
int KK = 10;
int colors[KK];
Mat p;
Mat bestLabels, centers;
vector<Mat> bgr, bgrBN;
split(frame_color, bgr);
Mat mask, clustered;
Mat clusteredAll = Mat::zeros(frame_color.rows, frame_color.cols, CV_32F);
for (int k = 0; k < contoursN.size(); k++)
{
mask = Mat::zeros(frame_color.rows, frame_color.cols, CV_8U);
drawContours(mask, contoursN, k, CV_RGB(255,255,255), CV_FILLED);
clustered = Mat::zeros(frame_color.rows, frame_color.cols, CV_32F);
int A = 0;
for (int i = 0; i < frame_color.rows; i++)
for (int j = 0; j < frame_color.cols; j++)
if (mask.at<uchar>(i,j) != 0)
A++;
if (A > 20)
{
p = Mat::zeros(A, G, CV_32F);
double moy = 0;
int z = 0;
for (int i = 0; i < frame_color.rows; i++)
{
for (int j = 0; j < frame_color.cols; j++)
{
if (mask.at<uchar>(i,j) != 0)
{
p.at<float>(z,0) = bgr[0].data[i*frame_color.cols+j] / 255.0;
p.at<float>(z,1) = bgr[1].data[i*frame_color.cols+j] / 255.0;
p.at<float>(z,2) = bgr[2].data[i*frame_color.cols+j] / 255.0;
z++;
moy = moy + frame.at<uchar>(i,j);
}
}
}
moy = moy/z;
double var = 0;
for (int i = 0; i < frame_color.rows; i++)
for (int j = 0; j < frame_color.cols; j++)
if (mask.at<uchar>(i,j) != 0)
var = var+(frame.at<uchar>(i,j) - moy)*(frame.at<uchar>(i,j) - moy);
var = var/(z*z);
int K = 1 + log(1+(A/A0)+(var/var0));
kmeans(p, K, bestLabels, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), 3, KMEANS_PP_CENTERS, centers);
for(int i=0; i<KK; i++)
colors[i] = 255/(i+1);
z = 0;
for (int i = 0; i < frame_color.rows; i++)
{
for (int j = 0; j < frame_color.cols; j++)
{
if (mask.at<uchar>(i,j) != 0)
{
clustered.at<float>(i,j) = (float)(colors[bestLabels.at<int>(1,z)]);
z++;
}
}
}
clusteredAll = clusteredAll + clustered;
clustered.convertTo(clustered, CV_8U);
}

why use template Mat_ matrix

I read some opencv codes on using template Mat_ matrix. Here they are:
Mat image = Mat:zeros(512,512,CV_8UC3);
for (int i = 0; i < image.rows; ++i)
for (int j = 0; j < image.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << j,i);
.....
}
The sampleMat is Mat sampleMat(1,2,CV_32FC1). Whey need using (Mat_<float>(1,2) << j,i)?

Opencv Mat efficiency linearized by right triangle

How to efficiency linearized Mat (symmetric matrix) to one row by right triangle.
For example, when I have:
0aabbb
b0aaaa
ba0bba
bac0aa
aaaa0c
abcab0
and then from that I get:
aabbbaaaabbaaac
Something like this:
...
template<class T>
Mat SSMJ::triangleLinearized(Mat mat){
int c = mat.cols;
Mat row = Mat(1, ((c*c)-c)/2, mat.type());
int i = 0;
for(int y = 1; y < mat.rows; y++)
for(int x = y; x < mat.cols; x++) {
row.at<T>(i)=mat.at<T>(y, x);
i++;
}
return row;
}
...
Since data in your mat is just a 1d array stored in row.data you can do whatever you want with it. I don't think you will find anything more special (w/o using vectorized methods) than just copying from this array.
int rows = 6;
char data[] = { 0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5,
0,1,2,3,4,5};
char result[100];
int offset = 0;
for (int i = 0; i < 5; offset += 5-i, i++) {
memcpy(&result[offset] , &data[rows * i + i + 1], 5 - i);
}
Or with opencv Mat it would be
int rows = mat.cols;
char result[100]; // you can calculate how much data u need
int offset = 0;
for (int i = 0; i < 5; offset += 5-i, i++) {
memcpy(&result[offset] , &mat.data[rows * i + i + 1], 5 - i);
}
Mat resultMat(1, offset, result);

How to set a whole matrix with specific value in OpenCV like Matlab

I want to do in OpenCV something like "A(A == val) = 0" that works in Matlab. I implemented some code but these are too slow (I use it many times)
I tried to do something like:
MatIterator_<T> it;
for (int i = 0; i < rows; i++){
tmp = in.row(i);
end = tmp.end<T>();
for (it = tmp.begin<T>(); it != end; ++it)
if (*it == val) *it = 0;
}
And
for (int i = 0; i < rows; i++){
*ptr = in.ptr<T>(i);
for (int j = 0; j < cols; j++){
if (*ptr == val) *ptr = 0;
ptr++;
}
}
I hope some suggestions. Thanks in advance.
This sets all elements of target that are 42 to the new value, 12:
cv::Mat mask = target == 42;
target.setTo(12, mask);

Obtain array from IplImage in JavaCV

I need to convert the code below from C++ to Java. In C++ I use openCV and I need to convert it in Java using JavaCV.
IplImage* img = cvLoadImage(argv[0]);
int rows = img->height;
int cols = img->width;
Mat matimg(img);
vector<vector<double> > img_vec(rows, vector<double>(cols));
for (int i=0; i < rows; i++) {
for (int j =0; j < cols; j++){
unsigned char temp;
temp = ((uchar*) matimg.data + i * matimg.step)[j * matimg.elemSize() + 1 ];
img_vec[i][j] = (double) temp;
}
}
I've tried the following conversion to java, but it doesn't work properly. I printed the values of temp and it is 0 all the times and for the same imgage the values of matimg.step and matimg.elemSize() are different in the C++ code and the Java code.
In c++ I get matimg.step = 2400 and matimg.elemSize() = 3 while in Java i get 3000 and 1.
Here is the code in java:
IplImage img = cvLoadImage(argv[0]);
int rows = img.height();
int cols = img.width();
CvMat matimg = img.asCvMat();
double img_vec[][] = new double[rows][cols];
for (int i=0; i < rows; i++) {
for (int j =0; j < cols; j++){
short temp;
temp = matimg.data_s().get(i * matimg.step() + j * matimg.elemSize() + 1);
img_vec[i][j] = (double) temp;
}
}
I don't understand where am I doing wrong?
Any help is appreciated,
Thanks.
I've solved my problem using this:
ByteBuffer buffer = img.getByteBuffer();
double img_vec[][] = new double[rows][cols];
for (int i=0; i < rows; i++) {
for (int j =0; j < cols; j++){
int ind = i * img.widthStep() + j * img.nChannels() + 1;
img_vec[i][j] = (buffer.get(ind) & 0xFF);
}
}

Resources