I am trying to implement an algorithm to process images with more than 256 bins.
The main issue to process histogram in such case comes from the impossibility to allocate more than 32 Kb as local tab in the GPU.
All the algorithms I found for 8 bits per pixel images use a fixed size tab locally.
The histogram is the first process in that tab then a barrier is up and at last an addition is made with the output vector.
I am working with IR image which has more than 32K bins of dynamic.
So I cannot allocate a fixed size tab inside the GPU.
My algorithm use an atomic_add in order to create directly the output histogram.
I am interfacing with OpenCV so, in order to manage the possible case of saturation my bins use floating points. Depending on the ability of the GPU to manage single or double precision.
OpenCV doesn't manage unsigned int, long, and unsigned long data type as matrix type.
I have an error... I do think this error is a kind of segmentation fault.
After several days I still have no idea what can be wrong.
Here is my code :
histogram.cl :
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics: enable
static void Atomic_Add_f64(__global double *val, double delta)
{
union {
double f;
ulong i;
} old;
union {
double f;
ulong i;
} new;
do {
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg ( (volatile __global ulong *)val, old.i, new.i) != old.i);
}
static void Atomic_Add_f32(__global float *val, double delta)
{
union
{
float f;
uint i;
} old;
union
{
float f;
uint i;
} new;
do
{
old.f = *val;
new.f = old.f + delta;
}
while (atom_cmpxchg ( (volatile __global ulong *)val, old.i, new.i) != old.i);
}
__kernel void khist(
__global const uchar* _src,
const int src_steps,
const int src_offset,
const int rows,
const int cols,
__global uchar* _dst,
const int dst_steps,
const int dst_offset)
{
const int gid = get_global_id(0);
// printf("This message has been printed from the OpenCL kernel %d \n",gid);
if(gid < rows)
{
__global const _Sty* src = (__global const _Sty*)_src;
__global _Dty* dst = (__global _Dty*) _dst;
const int src_step1 = src_steps/sizeof(_Sty);
const int dst_step1 = dst_steps/sizeof(_Dty);
src += mad24(gid,src_step1,src_offset);
dst += mad24(gid,dst_step1,dst_offset);
_Dty one = (_Dty)1;
for(int c=0;c<cols;c++)
{
const _Rty idx = (_Rty)(*(src+c+src_offset));
ATOMIC_FUN(dst+idx+dst_offset,one);
}
}
}
The function Atomic_Add_f64 directly come from here and there
main.cpp
#include <opencv2/core.hpp>
#include <opencv2/core/ocl.hpp>
#include <fstream>
#include <sstream>
#include <chrono>
int main()
{
cv::Mat_<unsigned short> a(480,640);
cv::RNG rng(std::time(nullptr));
std::for_each(a.begin(),a.end(),[&](unsigned short& v){ v = rng.uniform(0,100);});
bool ret = false;
cv::String file_content;
{
std::ifstream file_stream("../test/histogram.cl");
std::ostringstream file_buf;
file_buf<<file_stream.rdbuf();
file_content = file_buf.str();
}
int output_flag = cv::ocl::Device::getDefault().doubleFPConfig() == 0 ? CV_32F : CV_64F;
cv::String atomic_fun = output_flag == CV_32F ? "Atomic_Add_f32" : "Atomic_Add_f64";
cv::ocl::ProgramSource source(file_content);
// std::cout<<source.source()<<std::endl;
cv::ocl::Kernel k;
cv::UMat src;
cv::UMat dst = cv::UMat::zeros(1,65536,output_flag);
a.copyTo(src);
atomic_fun = cv::format("-D _Sty=%s -D _Rty=%s -D _Dty=%s -D ATOMIC_FUN=%s",
cv::ocl::typeToStr(src.depth()),
cv::ocl::typeToStr(src.depth()), // this to manage case like a matrix of usigned short stored as a matrix of float.
cv::ocl::typeToStr(output_flag),
atomic_fun.c_str());
ret = k.create("khist",source,atomic_fun);
std::cout<<"check create : "<<ret<<std::endl;
k.args(cv::ocl::KernelArg::ReadOnly(src),cv::ocl::KernelArg::WriteOnlyNoSize(dst));
std::size_t sz = a.rows;
ret = k.run(1,&sz,nullptr,false);
std::cout<<"check "<<ret<<std::endl;
cv::Mat b;
dst.copyTo(b);
std::copy_n(b.ptr<double>(0),101,std::ostream_iterator<double>(std::cout," "));
std::cout<<std::endl;
return EXIT_SUCCESS;
}
Hello I arrived to fix it.
I don't really know where the issue come from.
But if I suppose the output as a pointer rather than a matrix it work.
The changes I made are these :
histogram.cl :
__kernel void khist(
__global const uchar* _src,
const int src_steps,
const int src_offset,
const int rows,
const int cols,
__global _Dty* _dst)
{
const int gid = get_global_id(0);
if(gid < rows)
{
__global const _Sty* src = (__global const _Sty*)_src;
__global _Dty* dst = _dst;
const int src_step1 = src_steps/sizeof(_Sty);
src += mad24(gid,src_step1,src_offset);
ulong one = 1;
for(int c=0;c<cols;c++)
{
const _Rty idx = (_Rty)(*(src+c+src_offset));
ATOMIC_FUN(dst+idx,one);
}
}
}
main.cpp
k.args(cv::ocl::KernelArg::ReadOnly(src),cv::ocl::KernelArg::PtrWriteOnly(dst));
The rest of the code is the same in the two files.
For me it work fine.
If someone know why it work when the ouput is declared as a pointer rather than a vector (matrix of one row) I am interested.
Nevertheless my issue is fix :).
Related
My main purpose is to load frames from a video with OpenCV, then copy it Nvidia Gpu memory, resize it with a Cuda based nearest neighbour algorithm, then copy it back to the host side and visualise it with cv::imshow()
Unfortunately, I always got segmentation faults. There could be a problem with defining the amount of bytes to be copied or with the data conversions.
Below, you can find the main parts of the source code, but here is the repo for the full project:
https://github.com/foxakarmi/imageResize
Main function:
#include <iostream>
#include "cuda_utils.h"
#include "yololayer.h"
#include <opencv2/highgui/highgui.hpp>
void *buffers[3];
int main() {
cv::VideoCapture capture;
cv::Mat frame;
capture.open("/p.mp4");
if (!capture.isOpened()) {
std::cout << "can not open" << std::endl;
return -1;
}
capture.read(frame);
CUDA_CHECK(cudaMalloc(&buffers[0], frame.cols * frame.step[0]));
CUDA_CHECK(cudaMalloc(&buffers[1], 3 * 640 * 640));
buffers[2] = malloc(3 * 640 * 640);
while (capture.read(frame)) {
CUDA_CHECK(cudaMemcpy(buffers[0], frame.ptr(), frame.step[0] * frame.rows, cudaMemcpyHostToDevice))
cudaNearestResize((uchar *) buffers[0], (uchar *) buffers[1], frame.cols, frame.rows, 640, 640);
CUDA_CHECK(cudaMemcpy(buffers[2], buffers[1], 640 * 640 * 3, cudaMemcpyDeviceToHost))
cv::Mat foo;
foo.data = static_cast<uchar *>(buffers[2]);
cv::imshow("img", foo);
cv::waitKey(1);
}
capture.release();
return 0;
}
The .cu file containing the kernel and a wrapper function:
#include <opencv2/core/hal/interface.h>
#include "yololayer.h"
#include "cuda_utils.h"
__global__ void kernelNearestNeighbourResize(uchar *src_img, uchar *dst_img, int src_w, int src_h, int dst_w, int dst_h) {
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
int channel = 3;
if (i < dst_h && j < dst_w) {
int iIn = i * src_h / dst_h;
int jIn = j * src_w / dst_h;
dst_img[(i * dst_w + j) * channel + 0] = src_img[(iIn * src_w + jIn) * channel + 0];
dst_img[(i * dst_w + j) * channel + 1] = src_img[(iIn * src_w + jIn) * channel + 1];
dst_img[(i * dst_w + j) * channel + 2] = src_img[(iIn * src_w + jIn) * channel + 2];
}
}
cudaError_t cudaNearestResize(uchar *src_img, uchar *dst_img, int src_w, int src_h, int dst_w, int dst_h) {
if (!src_img || !dst_img)
return cudaErrorInvalidDevicePointer;
if (src_w == 0 || src_h == 0 || dst_w == 0 || dst_h == 0)
return cudaErrorInvalidValue;
kernelNearestNeighbourResize <<< 3600, 256>>>(
src_img, dst_img, src_w,
src_h, dst_w, dst_h);
return cudaGetLastError();
}
Below you can see a complete working solution.
There are 3 main issues in your code:
The setup for the CUDA grid is incorrect. See an example how to set it in my code below (just an initial working version that you can further improve). See some general info here: The CUDA Programming Model.
Note: the grid setup can have a meaningful effect on the overall performance, and it is not trivial to optimize.
See more info here: How do I choose grid and block dimensions for CUDA kernels?.
When copying the data to the device, you used frame.ptr() instead of frame.data.
You only set the data pointer for the output cv::Mat foo, without properly initializing it.
So the cv::Mat metadata (rows, cols etc.) were not set and cv::imshow could not show it properly.
In my code it is not required - see below.
Note that your code skips the first frame. I kept this behavior. You could include the first frame by checking if dst_img was already initialized, and if not (since it's the first frame) - initialize it and the CUDA buffers.
Some more notes on the code below:
There's no need to allocate buffer[2] for the host output image.
Instead I initialized the cv::Mat with the proper size and use it's allocated buffer.
I renamed the device buffers, and added cudaFree for them.
It is safer to pass the number of channels to the kernel, rather than making it assume it is 3.
I passed the step (AKA stride) of the images to the kernel. This will support the case where the images have padding (see about it here: stride and padding of an image).
Code for main:
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include "cuda_runtime.h"
#include <assert.h>
#define CUDA_CHECK(x) { cudaError_t cudaStatus = x; assert(cudaStatus == cudaSuccess); }
cudaError_t cudaNearestResize(unsigned char *src_img, unsigned char *dst_img, int channel,
int src_w, int src_h, int src_step, int dst_w, int dst_h, int dst_step);
int main()
{
cv::VideoCapture capture;
cv::Mat frame;
capture.open("/p.mp4");
if (!capture.isOpened())
{
std::cout << "can not open" << std::endl;
return -1;
}
capture.read(frame);
int src_w = frame.cols;
int src_h = frame.rows;
int src_step = (int)frame.step[0];
int channels = frame.channels();
int data_type = frame.type();
assert((data_type & CV_MAT_DEPTH_MASK) == CV_8U); // assert that it is a uchar image
// Parameters you can change:
int dst_w = 640;
int dst_h = 640;
cv::Mat dst_img(dst_h, dst_w, data_type);
int dst_step = (int)dst_img.step[0];
void * src_dev_buffer;
void * dst_dev_buffer;
CUDA_CHECK(cudaMalloc(&src_dev_buffer, src_h * src_step));
CUDA_CHECK(cudaMalloc(&dst_dev_buffer, dst_h * dst_step));
while (capture.read(frame))
{
// assert that the current frame has the same type and dimensions as the first one (should be guaranteed by the video decoder):
assert(frame.cols == src_w);
assert(frame.rows == src_h);
assert((int)frame.step[0] == src_step);
assert(frame.type() == data_type);
CUDA_CHECK(cudaMemcpy(src_dev_buffer, frame.data, src_h * src_step, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaNearestResize((unsigned char *)src_dev_buffer, (unsigned char *)dst_dev_buffer, channels, src_w, src_h, src_step, dst_w, dst_h, dst_step));
CUDA_CHECK(cudaMemcpy(dst_img.data, dst_dev_buffer, dst_h * dst_step, cudaMemcpyDeviceToHost));
cv::imshow("dst_img", dst_img);
cv::waitKey(1);
}
CUDA_CHECK(cudaFree(src_dev_buffer));
CUDA_CHECK(cudaFree(dst_dev_buffer));
capture.release();
return 0;
}
Code for the CUDA kernel and the wrapping function:
#include "cuda_runtime.h"
__global__ void kernelNearestNeighbourResize(unsigned char *src_img, unsigned char *dst_img, int channels,
int src_w, int src_h, int src_step, int dst_w, int dst_h, int dst_step)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < dst_h && j < dst_w)
{
int iIn = i * src_h / dst_h;
int jIn = j * src_w / dst_w;
int src_offset = i * dst_step + j * channels;
int dst_offset = iIn * src_step + jIn * channels;
for (int c = 0; c < channels; ++c)
{
dst_img[src_offset + c] = src_img[dst_offset + c];
}
}
}
cudaError_t cudaNearestResize(unsigned char *src_img, unsigned char *dst_img, int channels,
int src_w, int src_h, int src_step, int dst_w, int dst_h, int dst_step)
{
if (!src_img || !dst_img)
return cudaErrorInvalidDevicePointer;
if (src_w == 0 || src_h == 0 || dst_w == 0 || dst_h == 0)
return cudaErrorInvalidValue;
// The grid dimensions
dim3 dimBlock(32, 32);
dim3 dimGrid(dst_w / 32 + 1, dst_h / 32 + 1);
kernelNearestNeighbourResize << < dimGrid, dimBlock >> >(
src_img, dst_img, channels,
src_w, src_h, src_step, dst_w, dst_h, dst_step);
return cudaGetLastError();
}
I have an array already initialized that I am trying to use in each thread of the kernel call (each thread uses a different part of the array so there are no dependencies). I create the array and save memory on the device using cudaMalloc and the array is copied from host to device using cudaMemcpy.
I pass the pointer returned by cudaMalloc to the kernel call to be used by each thread.
int SIZE = 100;
int* data = new int[SIZE];
int* d_data = 0;
cutilSafeCall( cudaMalloc(&d_data, SIZE * sizeof(int)) );
for (int i = 0; i < SIZE; i++)
data[i] = i;
cutilSafeCall( cudaMemcpy(d_data, data, SIZE * sizeof(int), cudaMemcpyHostToDevice) );
This code was taken from here.
For the kernel call.
kernel<<<blocks, threads>>> (results, d_data);
I keep track of the results from each thread by using the struct Result. The next code works without errors.
__global__ void mainKernel(Result res[], int* data){
int x = data[0];
}
But when I assign that value to res:
__global__ void mainKernel(Result res[], int* data){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
int x = data[0];
res[threadId].x = x;
}
An error is raised:
cudaSafeCall() Runtime API error in file , line 355 : an illegal memory access was encountered.
The same error appears with any operation involving the use of that pointer
__global__ void mainKernel(Result res[], int* data){
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
int x = data[0];
if (x > 10)
res[threadId].x = 5;
}
There is no problem with the definition of res. Assigning any other value to res[threadId].x does not give me any error.
This is the output of running cuda-memcheck:
========= Invalid __global__ read of size 4
========= at 0x00000150 in mainKernel(Result*, int*)
========= by thread (86,0,0) in block (49,0,0)
========= Address 0x13024c0000 is out of bounds
========= Saved host backtrace up to driver entry point at kernel launch time
========= Host Frame:/usr/lib/x86_64-linux-gnu/libcuda.so.1 (cuLaunchKernel + 0x2cd) [0x150d6d]
========= Host Frame:./out [0x2cc4b]
========= Host Frame:./out [0x46c23]
========= Host Frame:./out [0x3e37]
========= Host Frame:./out [0x3ca1]
========= Host Frame:./out [0x3cd6]
========= Host Frame:./out [0x39e9]
========= Host Frame:/lib/x86_64-linux-gnu/libc.so.6 (__libc_start_main + 0xf5) [0x21ec5]
========= Host Frame:./out [0x31b9]
EDIT:
This is an example of the full code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
#include <assert.h>
typedef struct
{
int x,y,z;
} Result;
__global__ void mainKernel(Result pResults[], int* dataimage)
{
int threadId = (blockIdx.x * blockDim.x) + threadIdx.x;
int xVal = dataimage[0];
if (xVal > 10)
pResults[threadId].x = 5;
}
int main (int argc, char** argv)
{
int NUM_THREADS = 5*5;
int SIZE = 100;
int* data = new int[SIZE];
int* d_data = 0;
cutilSafeCall( cudaMalloc(&d_data, SIZE * sizeof(int)) );
for (int i = 0; i < SIZE; i++)
data[i] = i;
cutilSafeCall( cudaMemcpy(d_data, data, SIZE * sizeof(int), cudaMemcpyHostToDevice) );
unsigned int GPU_ID = 1; // not actually :-)
// unsigned int GPU_ID = cutGetMaxGflopsDeviceId() ;
cudaSetDevice(GPU_ID);
Result * results_GPU = 0;
cutilSafeCall( cudaMalloc( &results_GPU, NUM_THREADS * sizeof(Result)) );
Result * results_CPU = 0;
cutilSafeCall( cudaMallocHost( &results_CPU, NUM_THREADS * sizeof(Result)) );
mainKernel<<<5,5>>> ( results_GPU, d_data );
cudaThreadSynchronize();
cutilSafeCall( cudaMemcpy(results_CPU, results_GPU, NUM_THREADS * sizeof(Result),cudaMemcpyDeviceToHost) );
cutilSafeCall(cudaFree(results_GPU));
cutilSafeCall(cudaFreeHost(results_CPU));
cudaThreadExit();
} // ()
Your problem lies in this sequence of calls:
cutilSafeCall( cudaMalloc(&d_data, SIZE * sizeof(int)) );
for (int i = 0; i < SIZE; i++)
data[i] = i;
cutilSafeCall( cudaMemcpy(d_data, data, SIZE * sizeof(int), cudaMemcpyHostToDevice) );
unsigned int GPU_ID = 1;
cudaSetDevice(GPU_ID);
Result * results_GPU = 0;
cutilSafeCall( cudaMalloc( &results_GPU, NUM_THREADS * sizeof(Result)) );
Result * results_CPU = 0;
cutilSafeCall( cudaMallocHost( &results_CPU, NUM_THREADS * sizeof(Result)) );
mainKernel<<<5,5>>> ( results_GPU, d_data );
What is effectively happening is that you are allocating d_data and running your kernel on different GPUs, and d_data is not valid on the GPU you are launching the kernel on.
In detail, because you call cudaMalloc for d_data before cudaSetDevice, you are allocating d_data on the default device, and then explicitly allocating results_GPU and running the kernel on device 1. Clearly device 1 and the default device are not the same GPU (enumeration of devices usually starts at 0 in the runtime API).
If you change the code like this:
unsigned int GPU_ID = 1;
cutilSafeCall(cudaSetDevice(GPU_ID));
cutilSafeCall( cudaMalloc(&d_data, SIZE * sizeof(int)) );
for (int i = 0; i < SIZE; i++)
data[i] = i;
cutilSafeCall( cudaMemcpy(d_data, data, SIZE * sizeof(int), cudaMemcpyHostToDevice) );
Result * results_GPU = 0;
cutilSafeCall( cudaMalloc( &results_GPU, NUM_THREADS * sizeof(Result)) );
Result * results_CPU = 0;
cutilSafeCall( cudaMallocHost( &results_CPU, NUM_THREADS * sizeof(Result)) );
mainKernel<<<5,5>>> ( results_GPU, d_data );
i.e. select the non-default device before any allocations are made, the problem should disappear. The reason this doesn't happen with your very simple kernel:
__global__ void mainKernel(Result res[], int* data){
int x = data[0];
}
is simply that the CUDA compiler performs very aggressive optimisations by default, and because the result of the read of data[0] isn't actually used, the entire read can be optimised away and you are left with an empty stub kernel which doesn't do anything. Only when the result of the load from memory is used in a memory write will the code not be optimised away during compilation. You can confirm this yourself by dissassembling the code emitted by the compiler, if you are curious.
Note that there are ways to make this work on multi-GPU systems which supported it, via peer-to-peer access, but that must be explicitly configured in your code for that facility to be used.
I want to make the printf function print from right to left because this program convert the value of number to binary and I want it to be printed in proper form for example if I convert 16 it is written like that 00001 but it must look like that 10000 so does anyone know how to do that thanks in advance
#include <stdio.h>
#include <stdlib.h>
int main()
{
int x,rem;
printf("please enter number: ");
scanf("%d",&x);
while (x !=0)
{
rem=x%2;
if (rem==0)
{
printf("0");
}
else
{
printf("1");
}
x = x/2;
rem = 0;
}
return 0;
}
Here it is:
void print_binary(int x)
{
int skip = 1;
unsigned int mask = 1 << 31;
while(mask > 0){
if(x & mask){
skip = 0;
printf("1");
}else{
if(!skip) printf("0");
}
mask >>= 1;
}
printf("\n");
}
This will print the binary number without trailing zeroes.
If you rather want the result to be stored in a string, you can use:
#include <string.h>
void int_to_binary(int x, char * buff) // buff size must be >= 32 !
{
buff[0] = '\0'; // ensure string ends with \0
unsigned int mask = 1 << 31;
for (; mask > 0; mask >>= 1)
{
strcat(buff, (x & mask) ? "1" : "0");
}
}
To check both codes, use:
int main(int argc, char* argv[])
{
int x;
printf("please enter number: ");
scanf("%d",&x);
char bin[32];
int_to_binary(x, bin);
printf("%s\n", bin);
print_binary(x);
}
What we do is using a mask, which in binary is one "1" beginning on the far left and moving one step right at each loop. The "&" is a bite-wise operator (I let you google it to know how it works). If you need more explanation, feel free to ask.
#include<stdio.h>
#include<stdlib.h>
int main()
{
int binary[20];
int q,i=0;
printf("Enter the decimal no\n");
scanf("%d",&q);
while(q > 0)
{
binary[i]=q%2;
i++;
q=q/2;
}
for(int j=i-1;j>=0;j--)
{
printf("%d",binary[j]);
}
return 0;
}
I used Eigen library to convert several itk::image images into matrices, and do some dense linear algebra computations on them. Finally, I have the output as a matrix, but I need it in itk::image form. Is there any way to do this?
const unsigned int numberOfPixels = importSize[0] * importSize[1];
float* array1 = inverseU.data();
float* localBuffer = new float[numberOfPixels];
std::memcpy(localBuffer, array1, numberOfPixels);
const bool importImageFilterWillOwnTheBuffer = true;
importFilter->SetImportPointer(localBuffer,numberOfPixels,importImageFilterWillOwnTheBuffer);
importFilter->Update();
inverseU is the Eigen library matrix (float), importSize is the size of this matrix. When I give importFilter->GetOutput(), and write the result to file, the image I get is like this, which is not correct.
This is the matrix inverseU.
https://drive.google.com/file/d/0B3L9EtRhN11QME16SGtfSDJzSWs/view?usp=sharing . It is supposed to give a retinal fundus image in image form, I got the matrix after doing deblurring.
Take a look at the ImportImageFilter of itk. In particular, it may be used to build an itk::Image starting from a C-style array (example).
Someone recently asked how to convert a CImg image to ITK image. My answer might be a starting point...
A way to get the array out of a matrix A from Eigen may be found here :
double* array=A.data();
EDIT : here is a piece of code to turn a matrix of float into a png image saved with ITK. First, the matrix is converted to an itk Image of float. Then, this image is rescaled an cast to a image of unsigned char, using the RescaleIntensityImageFilter as explained here. Finally, the image is saved in png format.
#include <iostream>
#include <itkImage.h>
using namespace itk;
using namespace std;
#include <Eigen/Dense>
using Eigen::MatrixXf;
#include <itkImportImageFilter.h>
#include <itkImageFileWriter.h>
#include "itkRescaleIntensityImageFilter.h"
void eigen_To_ITK (MatrixXf mat)
{
const unsigned int Dimension = 2;
typedef itk::Image<unsigned char, Dimension> UCharImageType;
typedef itk::Image< float, Dimension > FloatImageType;
typedef itk::ImportImageFilter< float, Dimension > ImportFilterType;
ImportFilterType::Pointer importFilter = ImportFilterType::New();
typedef itk::RescaleIntensityImageFilter< FloatImageType, UCharImageType > RescaleFilterType;
RescaleFilterType::Pointer rescaleFilter = RescaleFilterType::New();
typedef itk::ImageFileWriter< UCharImageType > WriterType;
WriterType::Pointer writer = WriterType::New();
FloatImageType::SizeType imsize;
imsize[0] = mat.rows();
imsize[1] = mat.cols();
ImportFilterType::IndexType start;
start.Fill( 0 );
ImportFilterType::RegionType region;
region.SetIndex( start );
region.SetSize( imsize );
importFilter->SetRegion( region );
const itk::SpacePrecisionType origin[ Dimension ] = { 0.0, 0.0 };
importFilter->SetOrigin( origin );
const itk::SpacePrecisionType spacing[ Dimension ] = { 1.0, 1.0 };
importFilter->SetSpacing( spacing );
const unsigned int numberOfPixels = imsize[0] * imsize[1];
const bool importImageFilterWillOwnTheBuffer = true;
float * localBuffer = new float[ numberOfPixels ];
float * it = localBuffer;
memcpy(it, mat.data(), numberOfPixels*sizeof(float));
importFilter->SetImportPointer( localBuffer, numberOfPixels,importImageFilterWillOwnTheBuffer );
rescaleFilter ->SetInput(importFilter->GetOutput());
rescaleFilter->SetOutputMinimum(0);
rescaleFilter->SetOutputMaximum(255);
writer->SetFileName( "output.png" );
writer->SetInput(rescaleFilter->GetOutput() );
writer->Update();
}
int main()
{
const int rows = 42;
const int cols = 90;
MatrixXf mat1(rows, cols);
mat1.topLeftCorner(rows/2, cols/2) = MatrixXf::Zero(rows/2, cols/2);
mat1.topRightCorner(rows/2, cols/2) = MatrixXf::Identity(rows/2, cols/2);
mat1.bottomLeftCorner(rows/2, cols/2) = -MatrixXf::Identity(rows/2, cols/2);
mat1.bottomRightCorner(rows/2, cols/2) = MatrixXf::Zero(rows/2, cols/2);
mat1+=0.1*MatrixXf::Random(rows,cols);
eigen_To_ITK (mat1);
cout<<"running fine"<<endl;
return 0;
}
The program is build using CMake. Here is the CMakeLists.txt :
cmake_minimum_required(VERSION 2.8 FATAL_ERROR)
project(ItkTest)
find_package(ITK REQUIRED)
include(${ITK_USE_FILE})
# to include eigen. This path may need to be changed
include_directories(/usr/local/include/eigen3)
add_executable(MyTest main.cpp)
target_link_libraries(MyTest ${ITK_LIBRARIES})
I have a CUDA kernel which takes an edge image and processes it to create a smaller, 1D array of the edge pixels. Now here is the strange behaviour. Every time I run the kernel and calculate the number of edge pixels in "d_nlist" (see the code near the printf), I get a greater pixel count each time, even when I use the same image and stop the program completely and re-run. Therefore, each time I run it, it takes longer to run, until eventually, it throws an un-caught exception.
My question is, how can I stop this from happening so that I can get consistent results each time I run the kernel?
My device is a Geforce 620.
Constants:
THREADS_X = 32
THREADS_Y = 4
PIXELS_PER_THREAD = 4
MAX_QUEUE_LENGTH = THREADS_X * THREADS_Y * PIXELS_PER_THREAD
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_SIZE = IMG_WIDTH * IMG_HEIGHT
BLOCKS_X = IMG_WIDTH / (THREADS_X * PIXELS_PER_THREAD)
BLOCKS_Y = IMG_HEIGHT / THREADS_Y
The kernel is as follows:
__global__ void convert2DEdgeImageTo1DArray( unsigned char const * const image,
unsigned int* const list, int* const glob_index ) {
unsigned int const x = blockIdx.x * THREADS_X*PIXELS_PER_THREAD + threadIdx.x;
unsigned int const y = blockIdx.y * THREADS_Y + threadIdx.y;
volatile int qindex = -1;
volatile __shared__ int sh_qindex[THREADS_Y];
volatile __shared__ int sh_qstart[THREADS_Y];
sh_qindex[threadIdx.y] = -1;
// Start by making an array
volatile __shared__ unsigned int sh_queue[MAX_QUEUE_LENGTH];
// Fill the queue
for(int i=0; i<PIXELS_PER_THREAD; i++)
{
int const xx = i*THREADS_X + x;
// Read one image pixel from global memory
unsigned char const pixel = image[y*IMG_WIDTH + xx];
unsigned int const queue_val = (y << 16) + xx;
if(pixel)
{
do {
qindex++;
sh_qindex[threadIdx.y] = qindex;
sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + qindex] = queue_val;
} while (sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + qindex] != queue_val);
}
// Reload index from smem (last thread to write to smem will have updated it)
qindex = sh_qindex[threadIdx.y];
}
// Let thread 0 reserve the space required in the global list
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0)
{
// Find how many items are stored in each list
int total_index = 0;
#pragma unroll
for(int i=0; i<THREADS_Y; i++)
{
sh_qstart[i] = total_index;
total_index += (sh_qindex[i] + 1u);
}
// Calculate the offset in the global list
unsigned int global_offset = atomicAdd(glob_index, total_index);
#pragma unroll
for(int i=0; i<THREADS_Y; i++)
{
sh_qstart[i] += global_offset;
}
}
__syncthreads();
// Copy local queues to global queue
for(int i=0; i<=qindex; i+=THREADS_X)
{
if(i + threadIdx.x > qindex)
break;
unsigned int qvalue = sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + i + threadIdx.x];
list[sh_qstart[threadIdx.y] + i + threadIdx.x] = qvalue;
}
}
The following is the method which calls the kernel:
void call2DTo1DKernel(unsigned char const * const h_image)
{
// Device side allocation
unsigned char *d_image = NULL;
unsigned int *d_list = NULL;
int h_nlist, *d_nlist = NULL;
cudaMalloc((void**)&d_image, sizeof(unsigned char)*IMG_SIZE);
cudaMalloc((void**)&d_list, sizeof(unsigned int)*IMG_SIZE);
cudaMalloc((void**)&d_nlist, sizeof(int));
// Time measurement initialization
cudaEvent_t start, stop, startio, stopio;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startio);
cudaEventCreate(&stopio);
// Start timer w/ io
cudaEventRecord(startio,0);
// Copy image data to device
cudaMemcpy((void*)d_image, (void*)h_image, sizeof(unsigned char)*IMG_SIZE, cudaMemcpyHostToDevice);
// Start timer
cudaEventRecord(start,0);
// Kernel call
// Phase 1 : Convert 2D binary image to 1D pixel array
dim3 dimBlock1(THREADS_X, THREADS_Y);
dim3 dimGrid1(BLOCKS_X, BLOCKS_Y);
convert2DEdgeImageTo1DArray<<<dimGrid1, dimBlock1>>>(d_image, d_list, d_nlist);
// Stop timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
// Stop timer w/ io
cudaEventRecord(stopio,0);
cudaEventSynchronize(stopio);
// Time measurement
cudaEventElapsedTime(&et,start,stop);
cudaEventElapsedTime(&etio,startio,stopio);
// Time measurement deinitialization
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventDestroy(startio);
cudaEventDestroy(stopio);
// Get list size
cudaMemcpy((void*)&h_nlist, (void*)d_nlist, sizeof(int), cudaMemcpyDeviceToHost);
// Report on console
printf("%d pixels processed...\n", h_nlist);
// Device side dealloc
cudaFree(d_image);
cudaFree(d_space);
cudaFree(d_list);
cudaFree(d_nlist);
}
Thank you very much in advance for your help everyone.
As a preamble, let me suggest some troubleshooting steps that are useful:
instrument your code with proper cuda error checking
run your code with cuda-memcheck e.g. cuda-memcheck ./myapp
If you do the above steps, you'll find that your kernel is failing, and the failures have to do with global writes of size 4. So that will focus your attention on the last segment of your kernel, beginning with the comment // Copy local queues to global queue
Regarding your code, then, you have at least 2 problems:
The addressing/indexing in your final segment of your kernel, where you are writing the individual queues out to global memory, is messed up. I'm not going to try and debug this for you.
You are not initializing your d_nlist variable to zero. Therefore when you do an atomic add to it, you are adding your values to a junk value, which will tend to increase as you repeat the process.
Here's some code which has the problems removed, (I did not try to sort out your queue copy code) and error checking added. It produces repeatable results for me:
$ cat t216.cu
#include <stdio.h>
#include <stdlib.h>
#define THREADS_X 32
#define THREADS_Y 4
#define PIXELS_PER_THREAD 4
#define MAX_QUEUE_LENGTH (THREADS_X*THREADS_Y*PIXELS_PER_THREAD)
#define IMG_WIDTH 256
#define IMG_HEIGHT 256
#define IMG_SIZE (IMG_WIDTH*IMG_HEIGHT)
#define BLOCKS_X (IMG_WIDTH/(THREADS_X*PIXELS_PER_THREAD))
#define BLOCKS_Y (IMG_HEIGHT/THREADS_Y)
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void convert2DEdgeImageTo1DArray( unsigned char const * const image,
unsigned int* const list, int* const glob_index ) {
unsigned int const x = blockIdx.x * THREADS_X*PIXELS_PER_THREAD + threadIdx.x;
unsigned int const y = blockIdx.y * THREADS_Y + threadIdx.y;
volatile int qindex = -1;
volatile __shared__ int sh_qindex[THREADS_Y];
volatile __shared__ int sh_qstart[THREADS_Y];
sh_qindex[threadIdx.y] = -1;
// Start by making an array
volatile __shared__ unsigned int sh_queue[MAX_QUEUE_LENGTH];
// Fill the queue
for(int i=0; i<PIXELS_PER_THREAD; i++)
{
int const xx = i*THREADS_X + x;
// Read one image pixel from global memory
unsigned char const pixel = image[y*IMG_WIDTH + xx];
unsigned int const queue_val = (y << 16) + xx;
if(pixel)
{
do {
qindex++;
sh_qindex[threadIdx.y] = qindex;
sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + qindex] = queue_val;
} while (sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + qindex] != queue_val);
}
// Reload index from smem (last thread to write to smem will have updated it)
qindex = sh_qindex[threadIdx.y];
}
// Let thread 0 reserve the space required in the global list
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0)
{
// Find how many items are stored in each list
int total_index = 0;
#pragma unroll
for(int i=0; i<THREADS_Y; i++)
{
sh_qstart[i] = total_index;
total_index += (sh_qindex[i] + 1u);
}
// Calculate the offset in the global list
unsigned int global_offset = atomicAdd(glob_index, total_index);
#pragma unroll
for(int i=0; i<THREADS_Y; i++)
{
sh_qstart[i] += global_offset;
}
}
__syncthreads();
// Copy local queues to global queue
/*
for(int i=0; i<=qindex; i+=THREADS_X)
{
if(i + threadIdx.x > qindex)
break;
unsigned int qvalue = sh_queue[threadIdx.y*THREADS_X*PIXELS_PER_THREAD + i + threadIdx.x];
list[sh_qstart[threadIdx.y] + i + threadIdx.x] = qvalue;
}
*/
}
void call2DTo1DKernel(unsigned char const * const h_image)
{
// Device side allocation
unsigned char *d_image = NULL;
unsigned int *d_list = NULL;
int h_nlist=0, *d_nlist = NULL;
cudaMalloc((void**)&d_image, sizeof(unsigned char)*IMG_SIZE);
cudaMalloc((void**)&d_list, sizeof(unsigned int)*IMG_SIZE);
cudaMalloc((void**)&d_nlist, sizeof(int));
cudaCheckErrors("cudamalloc fail");
// Time measurement initialization
cudaEvent_t start, stop, startio, stopio;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startio);
cudaEventCreate(&stopio);
float et, etio;
// Start timer w/ io
cudaEventRecord(startio,0);
cudaMemcpy(d_nlist, &h_nlist, sizeof(int), cudaMemcpyHostToDevice);
// Copy image data to device
cudaMemcpy((void*)d_image, (void*)h_image, sizeof(unsigned char)*IMG_SIZE, cudaMemcpyHostToDevice);
cudaCheckErrors("cudamemcpy 1");
// Start timer
cudaEventRecord(start,0);
// Kernel call
// Phase 1 : Convert 2D binary image to 1D pixel array
dim3 dimBlock1(THREADS_X, THREADS_Y);
dim3 dimGrid1(BLOCKS_X, BLOCKS_Y);
convert2DEdgeImageTo1DArray<<<dimGrid1, dimBlock1>>>(d_image, d_list, d_nlist);
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
// Stop timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
// Stop timer w/ io
cudaEventRecord(stopio,0);
cudaEventSynchronize(stopio);
// Time measurement
cudaEventElapsedTime(&et,start,stop);
cudaEventElapsedTime(&etio,startio,stopio);
// Time measurement deinitialization
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventDestroy(startio);
cudaEventDestroy(stopio);
// Get list size
cudaMemcpy((void*)&h_nlist, (void*)d_nlist, sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy 2");
// Report on console
printf("%d pixels processed...\n", h_nlist);
// Device side dealloc
cudaFree(d_image);
// cudaFree(d_space);
cudaFree(d_list);
cudaFree(d_nlist);
}
int main(){
unsigned char *image;
image = (unsigned char *)malloc(IMG_SIZE * sizeof(unsigned char));
if (image == 0) {printf("malloc fail\n"); return 0;}
for (int i =0 ; i<IMG_SIZE; i++)
image[i] = rand()%2;
call2DTo1DKernel(image);
call2DTo1DKernel(image);
call2DTo1DKernel(image);
call2DTo1DKernel(image);
call2DTo1DKernel(image);
cudaCheckErrors("some error");
return 0;
}
$ nvcc -arch=sm_20 -O3 -o t216 t216.cu
$ ./t216
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
$ ./t216
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
32617 pixels processed...
$