Related
I have implemented Sobel operator in vertical direction. But the result which I am getting is very poor. I have attached my code below.
int mask_size= 3;
char mask [3][3]= {{-1,0,1},{-2,0,2},{-1,0,1}};
void sobel(Mat input_image)
{
/**Padding m-1 and n-1 zeroes to the result where m and n are mask_size**/
Mat result=Mat::zeros(input_image.rows+(mask_size - 1) * 2,input_image.cols+(mask_size - 1) * 2,CV_8UC1);
Mat result1=Mat::zeros(result.rows,result.cols,CV_8UC1);
int sum= 0;
/*For loop for copying original values to new padded image **/
for(int i=0;i<input_image.rows;i++)
for(int j=0;j<input_image.cols;j++)
result.at<uchar>(i+(mask_size-1),j+(mask_size-1))=input_image.at<uchar>(i,j);
GaussianBlur( result, result, Size(5,5), 0, 0, BORDER_DEFAULT );
/**For loop to implement the convolution **/
for(int i=0;i<result.rows-(mask_size - 1);i++)
for(int j=0;j<result.cols-(mask_size - 1);j++)
{
int counter=0;
int counterX=0,counterY=0;
sum= 0;
for(int k= i ; k < i + mask_size ; k++)
{
for(int l= j ; l< j + mask_size ; l++)
{
sum+=result.at<uchar>(k,l) * mask[counterX][counterY];
counterY++;
}
counterY=0;
counterX++;
}
result1.at<uchar>(i+mask_size/2,j+mask_size/2)=sum/(mask_size * mask_size);
}
/** Truncating all the extras rows and columns **/
result=Mat::zeros( result1.rows - (mask_size - 1) * 2, result1.cols - (mask_size - 1) * 2,CV_8UC1);
for(int i=0;i<result.rows;i++)
for(int j=0;j<result.cols;j++)
result.at<uchar>(i,j)=result1.at<uchar>(i+(mask_size - 1),j+(mask_size - 1));
imshow("Input",result);
imwrite("output2.tif",result);
}
My input to the algorithm is
My output is
I have also tried using Gaussian blur before actually convolving an image and the output I got is
The output which I am expecting is
The guide I am using is: https://www.tutorialspoint.com/dip/sobel_operator.htm
Your convolution looks ok although I only had a quick look.
Check your output type. It's unsigned char.
Now think about the values your output pixels may have if you have negative kernel values and if it is a good idea to store them in uchar directly.
If you store -1 in an unsigned char it will be wrapped around and your output is 255. In case you're wondering where all that excess white stuff is coming from. That's actually small negative gradients.
The desired result looks like the absolute of the Sobel output values.
I am trying to implement an algorithm to process images with more than 256 bins.
The main issue to process histogram in such case comes from the impossibility to allocate more than 32 Kb as local tab in the GPU.
All the algorithms I found for 8 bits per pixel images use a fixed size tab locally.
The histogram is the first process in that tab then a barrier is up and at last an addition is made with the output vector.
I am working with IR image which has more than 32K bins of dynamic.
So I cannot allocate a fixed size tab inside the GPU.
My algorithm use an atomic_add in order to create directly the output histogram.
I am interfacing with OpenCV so, in order to manage the possible case of saturation my bins use floating points. Depending on the ability of the GPU to manage single or double precision.
OpenCV doesn't manage unsigned int, long, and unsigned long data type as matrix type.
I have an error... I do think this error is a kind of segmentation fault.
After several days I still have no idea what can be wrong.
Here is my code :
histogram.cl :
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics: enable
static void Atomic_Add_f64(__global double *val, double delta)
{
union {
double f;
ulong i;
} old;
union {
double f;
ulong i;
} new;
do {
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg ( (volatile __global ulong *)val, old.i, new.i) != old.i);
}
static void Atomic_Add_f32(__global float *val, double delta)
{
union
{
float f;
uint i;
} old;
union
{
float f;
uint i;
} new;
do
{
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg ( (volatile __global ulong *)val, old.i, new.i) != old.i);
}
__kernel void khist(
__global const uchar* _src,
const int src_steps,
const int src_offset,
const int rows,
const int cols,
__global uchar* _dst,
const int dst_steps,
const int dst_offset)
{
const int gid = get_global_id(0);
// printf("This message has been printed from the OpenCL kernel %d \n",gid);
if(gid < rows)
{
__global const _Sty* src = (__global const _Sty*)_src;
__global _Dty* dst = (__global _Dty*) _dst;
const int src_step1 = src_steps/sizeof(_Sty);
const int dst_step1 = dst_steps/sizeof(_Dty);
src += mad24(gid,src_step1,src_offset);
dst += mad24(gid,dst_step1,dst_offset);
_Dty one = (_Dty)1;
for(int c=0;c<cols;c++)
{
const _Rty idx = (_Rty)(*(src+c+src_offset));
ATOMIC_FUN(dst+idx+dst_offset,one);
}
}
}
The function Atomic_Add_f64 directly come from here and there
main.cpp
#include <opencv2/core.hpp>
#include <opencv2/core/ocl.hpp>
#include <fstream>
#include <sstream>
#include <chrono>
int main()
{
cv::Mat_<unsigned short> a(480,640);
cv::RNG rng(std::time(nullptr));
std::for_each(a.begin(),a.end(),[&](unsigned short& v){ v = rng.uniform(0,100);});
bool ret = false;
cv::String file_content;
{
std::ifstream file_stream("../test/histogram.cl");
std::ostringstream file_buf;
file_buf<<file_stream.rdbuf();
file_content = file_buf.str();
}
int output_flag = cv::ocl::Device::getDefault().doubleFPConfig() == 0 ? CV_32F : CV_64F;
cv::String atomic_fun = output_flag == CV_32F ? "Atomic_Add_f32" : "Atomic_Add_f64";
cv::ocl::ProgramSource source(file_content);
// std::cout<<source.source()<<std::endl;
cv::ocl::Kernel k;
cv::UMat src;
cv::UMat dst = cv::UMat::zeros(1,65536,output_flag);
a.copyTo(src);
atomic_fun = cv::format("-D _Sty=%s -D _Rty=%s -D _Dty=%s -D ATOMIC_FUN=%s",
cv::ocl::typeToStr(src.depth()),
cv::ocl::typeToStr(src.depth()), // this to manage case like a matrix of usigned short stored as a matrix of float.
cv::ocl::typeToStr(output_flag),
atomic_fun.c_str());
ret = k.create("khist",source,atomic_fun);
std::cout<<"check create : "<<ret<<std::endl;
k.args(cv::ocl::KernelArg::ReadOnly(src),cv::ocl::KernelArg::WriteOnlyNoSize(dst));
std::size_t sz = a.rows;
ret = k.run(1,&sz,nullptr,false);
std::cout<<"check "<<ret<<std::endl;
cv::Mat b;
dst.copyTo(b);
std::copy_n(b.ptr<double>(0),101,std::ostream_iterator<double>(std::cout," "));
std::cout<<std::endl;
return EXIT_SUCCESS;
}
Hello I arrived to fix it.
I don't really know where the issue come from.
But if I suppose the output as a pointer rather than a matrix it work.
The changes I made are these :
histogram.cl :
__kernel void khist(
__global const uchar* _src,
const int src_steps,
const int src_offset,
const int rows,
const int cols,
__global _Dty* _dst)
{
const int gid = get_global_id(0);
if(gid < rows)
{
__global const _Sty* src = (__global const _Sty*)_src;
__global _Dty* dst = _dst;
const int src_step1 = src_steps/sizeof(_Sty);
src += mad24(gid,src_step1,src_offset);
ulong one = 1;
for(int c=0;c<cols;c++)
{
const _Rty idx = (_Rty)(*(src+c+src_offset));
ATOMIC_FUN(dst+idx,one);
}
}
}
main.cpp
k.args(cv::ocl::KernelArg::ReadOnly(src),cv::ocl::KernelArg::PtrWriteOnly(dst));
The rest of the code is the same in the two files.
For me it work fine.
If someone know why it work when the ouput is declared as a pointer rather than a vector (matrix of one row) I am interested.
Nevertheless my issue is fix :).
I am learning the thrust for the moment. I have a question: how to normalise with thrust?
I have a code that works, but I want to know if this is the optimum method.
struct square
{
__host__ __device__
float operator() (float x)
{
return x * x;
}
};
thrust::device_vector<float> d_x(2);
thrust::device_vector<float> d_y(2);
thrust::device_vector<float> d_z(2);
d_x[0] = 3;
d_x[1] = 4;
square<float> unary_op;
thrust::plus<float> binary_op;
float init = 0;
// compute norm
float norm = std::sqrt( thrust::transform_reduce(d_x.begin(), d_x.end(), unary_op, init, binary_op) );
thrust::fill(d_y.begin(), d_y.end(), 1/norm);
thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), d_z.begin(), thrust::multiplies<float>());
This should be more efficient because it does not need to use for storage or bandwidth for d_y or d_z:
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <cmath>
int main()
{
thrust::device_vector<float> d_x(2);
d_x[0] = 3;
d_x[1] = 4;
float norm = std::sqrt(thrust::inner_product(d_x.begin(), d_x.end()));
using namespace thrust::placeholders;
thrust::transform(d_x.begin(), d_x.end(), d_x.begin(), _1 /= norm);
return 0;
}
You'll want to make your problem size a few orders of magnitude larger, of course.
This question is specific to opencv:
The kmeans example given in the opencv documentation has a 2-channel matrix - one channel for each dimension of the feature vector. But, some of the other example seem to say that it should be a one channel matrix with features along the columns with one row for each sample. Which of these is right?
if I have a 5 dimensional feature vector, what should be the input matrix that I use:
This one:
cv::Mat inputSamples(numSamples, 1, CV32FC(numFeatures))
or this one:
cv::Mat inputSamples(numSamples, numFeatures, CV_32F)
The correct answer is cv::Mat inputSamples(numSamples, numFeatures, CV_32F).
The OpenCV Documentation about kmeans says:
samples – Floating-point matrix of input samples, one row per sample
So it is not a Floating-point vector of n-Dimensional floats as in the other option. Which examples suggested such a behaviour?
Here is also a small example by me that shows how kmeans can be used. It clusters the pixels of an image and displays the result:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
int main( int argc, char** argv )
{
Mat src = imread( argv[1], 1 );
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows, z) = src.at<Vec3b>(y,x)[z];
int clusterCount = 15;
Mat labels;
int attempts = 5;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
waitKey( 0 );
}
As alternative to reshaping the input matrix manually, you can use OpenCV reshape function to achieve similar result with less code. Here is my working implementation of reducing colors count with K-Means method (in Java):
private final static int MAX_ITER = 10;
private final static int CLUSTERS = 16;
public static Mat colorMapKMeans(Mat img, int K, int maxIterations) {
Mat m = img.reshape(1, img.rows() * img.cols());
m.convertTo(m, CvType.CV_32F);
Mat bestLabels = new Mat(m.rows(), 1, CvType.CV_8U);
Mat centroids = new Mat(K, 1, CvType.CV_32F);
Core.kmeans(m, K, bestLabels,
new TermCriteria(TermCriteria.COUNT | TermCriteria.EPS, maxIterations, 1E-5),
1, Core.KMEANS_RANDOM_CENTERS, centroids);
List<Integer> idx = new ArrayList<>(m.rows());
Converters.Mat_to_vector_int(bestLabels, idx);
Mat imgMapped = new Mat(m.size(), m.type());
for(int i = 0; i < idx.size(); i++) {
Mat row = imgMapped.row(i);
centroids.row(idx.get(i)).copyTo(row);
}
return imgMapped.reshape(3, img.rows());
}
public static void main(String[] args) {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
Highgui.imwrite("result.png",
colorMapKMeans(Highgui.imread(args[0], Highgui.CV_LOAD_IMAGE_COLOR),
CLUSTERS, MAX_ITER));
}
OpenCV reads image into 2 dimensional, 3 channel matrix. First call to reshape - img.reshape(1, img.rows() * img.cols()); - essentially unrolls 3 channels into columns. In resulting matrix one row corresponds to one pixel of the input image, and 3 columns corresponds to RGB components.
After K-Means algorithm finished its work, and color mapping has been applied, we call reshape again - imgMapped.reshape(3, img.rows()), but now rolling columns back into channels, and reducing row numbers to the original image row number, thus getting back the original matrix format, but only with reduced colors.
I'm trying to implement the Particle Swarm Optimization on CUDA. I'm partially initializing data arrays on host, then I allocate memory on CUDA and copy it there, and then try to proceed with the initialization.
The problem is, when I'm trying to modify array element like so
__global__ void kernelInit(
float* X,
size_t pitch,
int width,
float X_high,
float X_low
) {
// Silly, but pretty reliable way to address array elements
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = tid / width;
int c = tid % width;
float* pElement = (float*)((char*)X + r * pitch) + c;
*pElement = *pElement * (X_high - X_low) - X_low;
//*pElement = (X_high - X_low) - X_low;
}
It corrupts the values and gives me 1.#INF00 as array element. When I uncomment the last line *pElement = (X_high - X_low) - X_low; and comment the previous, it works as expected: I get values like 15.36 and so on.
I believe the problem is either with my memory allocation and copying, and/or with adressing the specific array element. I read the CUDA manual about these both topics, but I can't spot the error: I still get corrupt array if I do anything with the element of the array. For example, *pElement = *pElement * 2 gives unreasonable big results like 779616...00000000.00000 when the initial pElement is expected to be just a float in [0;1].
Here is the full source. Initialization of arrays begins in main (bottom of the source), then f1 function does the work for CUDA and launches the initialization kernel kernelInit:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
const unsigned f_n = 3;
const unsigned n = 2;
const unsigned p = 64;
typedef struct {
unsigned k_max;
float c1;
float c2;
unsigned p;
float inertia_factor;
float Ef;
float X_low[f_n];
float X_high[f_n];
float X_min[n][f_n];
} params_t;
typedef void (*kernelWrapperType) (
float *X,
float *X_highVec,
float *V,
float *X_best,
float *Y,
float *Y_best,
float *X_swarmBest,
bool &termination,
const float &inertia,
const params_t *params,
const unsigned &f
);
typedef float (*twoArgsFuncType) (
float x1,
float x2
);
__global__ void kernelInit(
float* X,
size_t pitch,
int width,
float X_high,
float X_low
) {
// Silly, but pretty reliable way to address array elements
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = tid / width;
int c = tid % width;
float* pElement = (float*)((char*)X + r * pitch) + c;
*pElement = *pElement * (X_high - X_low) - X_low;
//*pElement = (X_high - X_low) - X_low;
}
__device__ float kernelF1(
float x1,
float x2
) {
float y = pow(x1, 2.f) + pow(x2, 2.f);
return y;
}
void f1(
float *X,
float *X_highVec,
float *V,
float *X_best,
float *Y,
float *Y_best,
float *X_swarmBest,
bool &termination,
const float &inertia,
const params_t *params,
const unsigned &f
) {
float *X_d = NULL;
float *Y_d = NULL;
unsigned length = n * p;
const cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>();
size_t pitch;
size_t dpitch;
cudaError_t err;
unsigned width = n;
unsigned height = p;
err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
pitch = n * sizeof(float);
err = cudaMemcpy2D(X_d, dpitch, X, pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);
err = cudaMalloc (&Y_d, sizeof(float) * p);
err = cudaMemcpy (Y_d, Y, sizeof(float) * p, cudaMemcpyHostToDevice);
dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;
kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);
err = cudaMemcpy2D(X, pitch, X_d, dpitch, n*sizeof(float), p, cudaMemcpyDeviceToHost);
err = cudaFree(X_d);
err = cudaMemcpy(Y, Y_d, sizeof(float) * p, cudaMemcpyDeviceToHost);
err = cudaFree(Y_d);
}
float F1(
float x1,
float x2
) {
float y = pow(x1, 2.f) + pow(x2, 2.f);
return y;
}
/*
* Generates random float in [0.0; 1.0]
*/
float frand(){
return (float)rand()/(float)RAND_MAX;
}
/*
* This is the main routine which declares and initializes the integer vector, moves it to the device, launches kernel
* brings the result vector back to host and dumps it on the console.
*/
int main() {
const params_t params = {
100,
0.5,
0.5,
p,
0.98,
0.01,
{-5.12, -2.048, -5.12},
{5.12, 2.048, 5.12},
{{0, 1, 0}, {0, 1, 0}}
};
float X[p][n];
float X_highVec[n];
float V[p][n];
float X_best[p][n];
float Y[p] = {0};
float Y_best[p] = {0};
float X_swarmBest[n];
kernelWrapperType F_wrapper[f_n] = {&f1, &f1, &f1};
twoArgsFuncType F[f_n] = {&F1, &F1, &F1};
for (unsigned f = 0; f < f_n; f++) {
printf("Optimizing function #%u\n", f);
srand ( time(NULL) );
for (unsigned i = 0; i < p; i++)
for (unsigned j = 0; j < n; j++)
X[i][j] = X_best[i][j] = frand();
for (int i = 0; i < n; i++)
X_highVec[i] = params.X_high[f];
for (unsigned i = 0; i < p; i++)
for (unsigned j = 0; j < n; j++)
V[i][j] = frand();
for (unsigned i = 0; i < p; i++)
Y_best[i] = F[f](X[i][0], X[i][1]);
for (unsigned i = 0; i < n; i++)
X_swarmBest[i] = params.X_high[f];
float y_swarmBest = F[f](X_highVec[0], X_highVec[1]);
bool termination = false;
float inertia = 1.;
for (unsigned k = 0; k < params.k_max; k++) {
F_wrapper[f]((float *)X, X_highVec, (float *)V, (float *)X_best, Y, Y_best, X_swarmBest, termination, inertia, ¶ms, f);
}
for (unsigned i = 0; i < p; i++)
{
for (unsigned j = 0; j < n; j++)
{
printf("%f\t", X[i][j]);
}
printf("F = %f\n", Y[i]);
}
getchar();
}
}
Update: I tried adding error handling like so
err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
if (err != cudaSuccess) {
fprintf(stderr, cudaGetErrorString(err));
exit(1);
}
after each API call, but it gave me nothing and didn't return (I still get all the results and program works to the end).
This is an unnecessarily complex piece of code for what should be a simple repro case, but this immediately jumps out:
const unsigned n = 2;
const unsigned p = 64;
unsigned length = n * p
dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;
kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);
So you are firstly computing the incorrect number of blocks, and then reversing the order of the blocks per grid and threads per block arguments in the kernel launch. That may well lead to out of bounds memory access, either hosing something in GPU memory or causing an unspecified launch failure, which your lack of error handling might not be catching. There is a tool called cuda-memcheck which has been shipped with the toolkit since about CUDA 3.0. If you run it, it will give you valgrind style memory access violation reports. You should get into the habit of using it, if you are not already doing so.
As for infinite values, that is to be expected isn't it? Your code starts with values in (0,1), and then does
X[i] = X[i] * (5.12--5.12) - -5.12
100 times, which is the rough equivalent of multiplying by 10^100, which is then followed by
X[i] = X[i] * (2.048--2.048) - -2.048
100 times, which is the rough equivalent of multiplying by 4^100, finally followed by
X[i] = X[i] * (5.12--5.12) - -5.12
again. So your results should be of the order of 1E250, which is much larger than the maximum 3.4E38 which is the rough upper limit of representable numbers in IEEE 754 single precision.