I'm a beginner in OpenCL. And I've been trying to write a matrix multiplication code.
It works fine only it gives garbage value as the output for C array. I'm unable to fix the error.
Any help will be much appreciated.
Here's is the host and kernel code.
#include <CL/cl.h>
#include <iostream>
#include <cstdio>
#include <fstream>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
using namespace std;
#define SUCCESS 0
#define FAILURE 1
// Function to convert file name into a string
int convertToString(const char *filename, std::string &s)
{
size_t size;
char *str;
std::fstream f(filename, (std::fstream::in | std::fstream::binary));
if (f.is_open())
{
size_t fileSize;
f.seekg(0, std::fstream::end);
size = fileSize = (size_t)f.tellg();
f.seekg(0, std::fstream::beg);
str = new char[size + 1];
if (!str)
{
f.close();
return 0;
}
f.read(str, fileSize);
f.close();
str[size] = '\0';
s = str;
delete[] str;
return 0;
}
cout << "Error: failed to open file\n:" << filename << endl;
return FAILURE;
}
int main()
{
cl_uint status;
cl_int *error;
int A[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1};
int B[9] = {2, 2, 2, 2, 2, 2, 2, 2, 2};
int C[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
// Setting up platforms
cl_platform_id platform = NULL;
cl_uint numPlatforms = 0;
// Getting no of platforms
status = clGetPlatformIDs(0, NULL, &numPlatforms);
if (status != CL_SUCCESS)
{
cout << "\nUnable to query platforms";
return 0;
}
// Get the platform
if (numPlatforms > 0)
{
cl_platform_id*platforms=
cl_platform_id*)malloc(numPlatforms*sizeof(cl_platform_id));
status = clGetPlatformIDs(numPlatforms, platforms, NULL);
platform = platforms[0];
free(platforms);
}
cl_uint numDevices = 0;
cl_device_id *devices = NULL;
status =
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, devices, &numDevices);
if (numDevices == 0)
{
cout << "No GPU device available! Choosing CPU.\n";
status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_CPU, 0, devices,
&numDevices);
devices = (cl_device_id *)malloc(numDevices * sizeof(cl_device_id));
status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_CPU, numDevices,
devices, NULL);
}
else
{
devices = (cl_device_id *)malloc(numDevices * sizeof(cl_device_id));
status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices,
devices, NULL);
if (status == 0)
{
cout << "Device error!";
return 0;
}
}
// Creating contexts
cl_context context =
clCreateContext(NULL, 1, devices, NULL, NULL, (cl_int *)status);
if (status != CL_SUCCESS)
{
cout << status;
}
// Creating command queues
cl_command_queue command =
clCreateCommandQueue(context, devices[0], 0, NULL);
// if(error!=CL_SUCCESS)
//{
// cout<<error;
//}
// Creating buffers
cl_mem bufferA = clCreateBuffer(context, CL_MEM_READ_ONLY,
3 * 3 * sizeof(int), NULL, NULL);
cl_mem bufferB = clCreateBuffer(context, CL_MEM_READ_ONLY,
3 * 3 * sizeof(int), NULL, NULL);
cl_mem bufferC = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
3 * 3 * sizeof(int), NULL, NULL);
status = clEnqueueWriteBuffer(command, bufferA, CL_TRUE, 0, 9 * sizeof(int),
(void *)A, 0, NULL, NULL);
status = clEnqueueWriteBuffer(command, bufferB, CL_TRUE, 0, 9 * sizeof(int),
(void *)B, 0, NULL, NULL);
// status=clEnqueueReadBuffer(command,bufferA,CL_TRUE,0,9*sizeof(int),(void*)C,0,NULL,NULL);
const char *filename = "kernel.cl";
string sourceStr;
status = convertToString(filename, sourceStr);
const char *source = sourceStr.c_str();
size_t sourceSize[] = {strlen(source)};
cl_program program =
clCreateProgramWithSource(context, 1, &source, sourceSize, NULL);
status = clBuildProgram(program, numDevices, 0, NULL, NULL, NULL);
cl_kernel myKernel = clCreateKernel(program, "multiply", NULL);
// Setting kernel arguments
clSetKernelArg(myKernel, 0, sizeof(cl_mem), &bufferC);
clSetKernelArg(myKernel, 1, sizeof(cl_mem), &bufferA);
clSetKernelArg(myKernel, 2, sizeof(cl_mem), &bufferB);
size_t localws[2] = {9, 9};
size_t globalws[2] = {3, 3};
status = clEnqueueNDRangeKernel(command, myKernel, 2, NULL, globalws,
localws, 0, NULL, NULL);
status = clEnqueueReadBuffer(command, bufferC, CL_TRUE, 0, 9 * sizeof(int),
(void *)C, 0, NULL, NULL);
for (int i = 0; i < 9; i++) cout << C[i] << " ";
status = clReleaseKernel(myKernel); // Release kernel.
status = clReleaseProgram(program); // Release program object.
status = clReleaseMemObject(bufferA); // Release mem object.
status = clReleaseMemObject(bufferB);
status = clReleaseMemObject(bufferC);
status = clReleaseCommandQueue(command); // Release Command queue.
status = clReleaseContext(context); // Release context.
}
Kernel code:
__kernel void multiply(_global int outputC, _global int inputA,
_global int inputB)
{
int row = get_global_id(0);
int col = get_global_id(1);
int sum = 0;
for (int i = 0; i < 3; i++)
sum += inputA[row * 3 + 1] * inputB[i * 3 + col];
outputC[row + 3 + col] = sum;
}
As already pointed out by #Marco13 the kernel suffers from quite a few issues.
When running this kernel through a tool like clcc you can see that there are a number of compilation errors to begin with:
> clcc matmul.cl
"/tmp/OCLu7FyFF.cl", line 1: error: identifier "_global" is undefined
__kernel void multiply(_global int outputC, _global int inputA,
^
"/tmp/OCLu7FyFF.cl", line 1: error: invalid combination of type specifiers
__kernel void multiply(_global int outputC, _global int inputA,
^
"/tmp/OCLu7FyFF.cl", line 1: error: identifier "_global" is undefined
__kernel void multiply(_global int outputC, _global int inputA,
^
"/tmp/OCLu7FyFF.cl", line 1: error: invalid combination of type specifiers
__kernel void multiply(_global int outputC, _global int inputA,
^
"/tmp/OCLu7FyFF.cl", line 2: error: identifier "_global" is undefined
_global int inputB)
^
"/tmp/OCLu7FyFF.cl", line 2: error: invalid combination of type specifiers
_global int inputB)
^
6 errors detected in the compilation of "/tmp/OCLu7FyFF.cl".
A tool like clcc is very useful for catching errors early on. Most vendors also have their own version of a standalone kernel compiler/checker: e.g. Intel has its Kernel Builder, AMD's CodeXL contains a static kernel analyzer. Another option is to retrieve kernel compilation errors right from your host code, by calling clGetProgramBuildInfo to retrieve the compiler output, after clBuildProgram returned CL_BUILD_PROGRAM_FAILURE.
Once these compilation errors are fixed, it looks like your kernel is still not doing what you expect: as noted, the inputs and outputs should be pointers, as you will be passing buffers to the kernel. Also, the indexing of your input and output arrays is incorrect: In the for-loop inputA[row * 3 + 1] should be inputA[row * 3 + i] (i instead of 1). When saving the result to outputC, I would expect outputC[row * 3 + col] (row * 3) instead of row + 3).
I haven't looked in detail at the host code, but I would at least make sure, especially when just starting out with OpenCL, to always check every return code and error. This will save you a lot of time and frustration.
Finally, if you want a quick jump-start to learning OpenCL with a hands-on approach, I would strongly recommend going through the open source Hands-on OpenCL training by Simon McIntosh-Smith and Tom Deakin. It doesn't take very long, is quite pragmatic and provides lots of useful insights. Optimizing matrix multiplication is one of the use cases that is shown step-by-step.
Related
I am trying to make use of openpose example in opencv using caffe model and opencv/dnn.hpp
tutorial I have been following - https://www.learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
we require 2 files for the network as said in the tutorial :
1 - prototxt - https://github.com/spmallick/learnopencv/blob/master/OpenPose/pose/coco/pose_deploy_linevec.prototxt
2 - caffe model - posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel
ros node that I made following the tutorial :
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <sensor_msgs/image_encodings.h>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
static const std::string OPENCV_WINDOW = "Image window";
#define COCO
#ifdef COCO
const int POSE_PAIRS[17][2] =
{
{1,2}, {1,5}, {2,3},
{3,4}, {5,6}, {6,7},
{1,8}, {8,9}, {9,10},
{1,11}, {11,12}, {12,13},
{1,0},{0,14},
{14,16}, {0,15}, {15,17}
};
static const std::string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
static const std::string weightsFile = "pose/coco/pose_iter_440000.caffemodel";
int nPoints = 18;
#endif
class ImageConverter
{
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_;
public:
ImageConverter()
: it_(nh_)
{
image_sub_ = it_.subscribe("/zed/rgb/image_raw_color", 1, &ImageConverter::imageCb, this);
}
~ImageConverter()
{
cv::destroyWindow(OPENCV_WINDOW);
}
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
detect_people(cv_ptr->image);
cv::waitKey(3);
}
void detect_people(cv::Mat msg)
{
int inWidth = msg.cols;
int inHeight = msg.rows;
float thresh = 0.1;
cv::Mat frame;
msg.copyTo(frame);
cv::Mat frameCopy = frame.clone();
int frameWidth = frame.cols;
int frameHeight = frame.rows;
cv::dnn::Net net = cv::dnn::readNetFromCaffe("pose_deploy_linevec.prototxt" ,"pose_iter_440000.caffemodel");
cv::Mat inpBlob = blobFromImage(frame, 1.0/255, cv::Size(inWidth, inHeight), cv::Scalar(0, 0, 0), false, false);
net.setInput(inpBlob);
cv::Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
std::vector<cv::Point> points(nPoints);
for (int n=0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
cv::Mat probMap(H, W, CV_32F, output.ptr(0,n));
cv::Point2f p(-1,-1);
cv::Point maxLoc;
double prob;
cv::minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
p = maxLoc;
p.x *= (float)frameWidth / W ;
p.y *= (float)frameHeight / H ;
cv::circle(frameCopy, cv::Point((int)p.x, (int)p.y), 8, Scalar(0,255,255), -1);
cv::putText(frameCopy, cv::format("%d", n), cv::Point((int)p.x, (int)p.y), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
points[n] = p;
}
int nPairs = sizeof(POSE_PAIRS)/sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x<=0 || partA.y<=0 || partB.x<=0 || partB.y<=0)
continue;
cv::line(frame, partA, partB, cv::Scalar(0,255,255), 8);
cv::circle(frame, partA, 8, cv::Scalar(0,0,255), -1);
cv::circle(frame, partB, 8, cv::Scalar(0,0,255), -1);
}
cv::imshow("Output-Skeleton", frame);
}
};
int main(int argc, char** argv)
{
ros::init(argc, argv, "image_converter");
ros::NodeHandle nh_;
ros::Publisher pub;
ImageConverter ic;
ros::spin();
return 0;
}
The code is compiled without any errors, but while I run the code it gives the following error msg :
I get the following error when I run the node
error - OpenCV Error: Unspecified error (FAILED: fs.is_open(). Can't open "pose_deploy_linevec.prototxt") in ReadProtoFromTextFile, file /tmp/binarydeb/ros-kinetic-opencv3-3.3.1/modules/dnn/src/caffe/caffe_io.cpp, line 1119
terminate called after throwing an instance of 'cv::Exception'
what(): /tmp/binarydeb/ros-kinetic-opencv3-3.3.1/modules/dnn/src/caffe/caffe_io.cpp:1119: error: (-2) FAILED: fs.is_open(). Can't open "pose_deploy_linevec.prototxt" in function ReadProtoFromTextFile
Aborted (core dumped)
please help me solve this issue.
This issue is probably with the windows users only.
Solve the issue by:
Using/Adding absolute path when calling prototxt file.
Add the extension too. For Example:
"pose/coco/pose_deploy_linevec.prototxt.txt"
Spent 3 hours debugging this myself. Hope it helped someone else.
You are selecting the wrong file path.
Just replace this line:
static const std::string protoFile = "pose/coco/pose_deploy_linevec.prototxt";
with the path of the prototxt file in your laptop like this:
static const std::string protoFile = "C:/Users/lenovo/Desktop/learnopencv-master/OpenPose/pose/coco/pose_deploy_linevec.prototxt";
I would like to make as automatic as possible the compilation and linking of my code projects using OpenCL on OS X, I know how to do it for C++ but I am experiencing problems for OpenCL. This is the code that I am using as an example:
main.cpp:
#include <stdio.h>
#include <stdlib.h>
#ifdef __APPLE__ //Mac OSX has a different name for the header file
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define MEM_SIZE (128)//suppose we have a vector with 128 elements
#define MAX_SOURCE_SIZE (0x100000)
int main()
{
//In general Intel CPU and NV/AMD's GPU are in different platforms
//But in Mac OSX, all the OpenCL devices are in the platform "Apple"
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_context context = NULL;
cl_command_queue command_queue = NULL; //"stream" in CUDA
cl_mem memobj = NULL;//device memory
cl_program program = NULL; //cl_prgram is a program executable created from the source or binary
cl_kernel kernel = NULL; //kernel function
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret; //accepts return values for APIs
float mem[MEM_SIZE]; //alloc memory on host(CPU) ram
//OpenCL source can be placed in the source code as text strings or read from another file.
FILE *fp;
const char fileName[] = "./kernel.cl";
size_t source_size;
char *source_str;
cl_int i;
// read the kernel file into ram
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char *)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp );
fclose( fp );
//initialize the mem with 1,2,3...,n
for( i = 0; i < MEM_SIZE; i++ ) {
mem[i] = i;
}
//get the device info
ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_DEFAULT, 1, &device_id, &ret_num_devices);
//create context on the specified device
context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
//create the command_queue (stream)
command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
//alloc mem on the device with the read/write flag
memobj = clCreateBuffer(context, CL_MEM_READ_WRITE, MEM_SIZE * sizeof(float), NULL, &ret);
//copy the memory from host to device, CL_TRUE means blocking write/read
ret = clEnqueueWriteBuffer(command_queue, memobj, CL_TRUE, 0, MEM_SIZE * sizeof(float), mem, 0, NULL, NULL);
//create a program object for a context
//load the source code specified by the text strings into the program object
program = clCreateProgramWithSource(context, 1, (const char **)&source_str, (const size_t *)&source_size, &ret);
//build (compiles and links) a program executable from the program source or binary
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
//create a kernel object with specified name
kernel = clCreateKernel(program, "vecAdd", &ret);
//set the argument value for a specific argument of a kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&memobj);
//define the global size and local size (grid size and block size in CUDA)
size_t global_work_size[3] = {MEM_SIZE, 0, 0};
size_t local_work_size[3] = {MEM_SIZE, 0, 0};
//Enqueue a command to execute a kernel on a device ("1" indicates 1-dim work)
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, global_work_size, local_work_size, 0, NULL, NULL);
//copy memory from device to host
ret = clEnqueueReadBuffer(command_queue, memobj, CL_TRUE, 0, MEM_SIZE * sizeof(float), mem, 0, NULL, NULL);
//print out the result
for(i=0; i<MEM_SIZE; i++) {
printf("mem[%d] : %.2f\n", i, mem[i]);
}
//clFlush only guarantees that all queued commands to command_queue get issued to the appropriate device
//There is no guarantee that they will be complete after clFlush returns
ret = clFlush(command_queue);
//clFinish blocks until all previously queued OpenCL commands in command_queue are issued to the associated device and have completed.
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(memobj);//free memory on device
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(source_str);//free memory on host
return 0;
}
kernel.cl:
__kernel void vecAdd(__global float* a)
{
int gid = get_global_id(0);// in CUDA = blockIdx.x * blockDim.x + threadIdx.x
a[gid] += a[gid];
}
and this is my CMakelists.txt so far:
#Minimal OpenCL CMakeLists.txt by StreamHPC
cmake_minimum_required (VERSION 3.1)
project(GreatProject)
# Handle OpenCL
find_package(OpenCL REQUIRED)
include_directories(${OpenCL_INCLUDE_DIRS})
link_directories(${OpenCL_LIBRARY})
add_executable (main main.cpp)
target_include_directories (main PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries (main ${OpenCL_LIBRARY})
Apparently it compiles, but when I run the executable I get the error:
Failed to load kernel.
I compiled successfully the code by hand following this answer, but my project is willing to have various kernels and various C++ files and headers, therefore I would like to use CMake in order to automatize the compilation of the project.
How should I modify my CMakeLists.txt script?
NOTE:
I guess that the file kernel.cl is not being compiled, I don't what is a proper way to guarantee having a CMakeLists.txt that always compiles all the *.cl files in the project directory in addition to all the *.cpp. Would be even better if it is posible to linking agains MKL.
For mac opencl is used as a framework you need to do the following to link libraries from the framework.
cmake_minimum_required (VERSION 2.6)
project (montecarlo_cl)
find_package(OpenCL REQUIRED)
include_directories( ${OPENCL_INCLUDE_DIR})
set (montecarlo_cl_src montecarlo_ocl.c)
add_executable (montecarlo_cl ${montecarlo_cl_src})
target_link_libraries(montecarlo_cl "-framework OpenCL" )
I ran into a problem which is making me crazy.
I need to simulate dynamic memory allocation in OpenCl kernel. In this regard, I have the following malloc function defined in a *.cl file:
__global void* malloc(size_t size, __global byte *heap, __global uint *next)
{
uint index = atomic_add(next, size);
return heap+index;
}
In the host program, I dynamically dedicate a large array of type cl_uchar for this virtual heap as follows:
int MAX_NUM_OF_HEADERS_PROCESSED_IN_PARALLEL = 1000;
cl_uchar* heap = new cl_byte[1000000];
cl_uint *next = new cl_uint;
*next = 0;
cl_uint * test_result =
new cl_uint[MAX_NUM_OF_HEADERS_PROCESSED_IN_PARALLEL];
cl_mem memory[3]= { 0, 0, 0};
cl_int error;
memory[0] = clCreateBuffer(GPU_context,
CL_MEM_READ_WRITE, sizeof(cl_uchar) * MAX_HEAP_SIZE, NULL,
NULL);
memory[1] = clCreateBuffer(GPU_context, CL_MEM_READ_WRITE, sizeof(cl_uint), NULL,
&error);
memory[2] = clCreateBuffer(GPU_context, CL_MEM_READ_WRITE,
sizeof(cl_uint) * MAX_NUM_OF_HEADERS_PROCESSED_IN_PARALLEL, NULL,
&error);
clEnqueueWriteBuffer(command_queue, memory[0], CL_TRUE, 0,
sizeof(cl_uchar) * MAX_HEAP_SIZE, heap, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queue, memory[1], CL_TRUE, 0, sizeof(cl_uint),
next, 0, NULL, NULL);
error = 0;
error |= clSetKernelArg(kernel, 0, sizeof(cl_mem), &memory[0]);
error |= clSetKernelArg(kernel, 1, sizeof(cl_mem), &memory[1]);
error |= clSetKernelArg(kernel, 2, sizeof(cl_mem), &memory[2]);
size_t globalWorkSize[1] = { MAX_NUM_OF_HEADERS_PROCESSED_IN_PARALLEL };
size_t localWorkSize[1] = { 1 };
error = 0;
error = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
globalWorkSize, localWorkSize, 0, NULL, NULL);
I also have the following kernel:
__kernel void packet_routing2(__global byte* heap_, __global uint* next, __global uint* test_result){
int gid = get_global_id(0);
__global uint*xx[100];
for ( int i = 0 ; i < 100; i ++)
{
xx[i] = (__global uint*) malloc(sizeof(uint),heap_,next);
*xx[i] = i*gid;
result[gid] = *(xx[0]);
}
I encounterd the following error when I run the program:
" %27 = load i32 addrspace(1)* %26, align 4, !tbaa !17
Illegal pointer which is not from a valid memory space.
Aborting..."
Could you please help me fix this issue. I also found out that if xx has only 10 elements, instead of 100, the code works well !!!!
Edit: Simplest solution: add a padding value to 'size' before malloc so all struct types (that are lesser in size than max-padding) receive necessary alignment conditions.
0=struct footprint in memory
*=heap
_=padding
***000_____*****0000____****0_______****00000___*****0000000_*******00______***
|
v
save this unused padded memory space in its thread to use later.
it is important that first/starting address value needs to satisfy maximum alignment requirements. If there is a struct 256-byte long, it should have multiple of 256 to start.
struct size malloc size minimum 'next' value (address, not offset)
1-4 4 multiple of 4
5-8 8 multiple of 8
9-16 16 multiple of 16
17-32 32 32*k
33-64 64 64*k
if there is 64-byte struct, even an int needs 64-byte malloc size now. Maybe you can save that values locally per thread to use that remaining unused areas.
So it doesnt give alignment errors and probably works faster for those don't.
Also float3 needs 16 byte natively.
I have a big problem (on Linux):
I create a buffer with defined data, then an OpenCL kernel takes this data and puts it into an image2d_t. When working on an AMD C50 (Fusion CPU/GPU) the program works as desired, but on my GeForce 9500 GT the given kernel computes the correct result very rarely. Sometimes the result is correct, but very often it is incorrect. Sometimes it depends on very strange changes like removing unused variable declarations or adding a newline. I realized that disabling the optimization will increase the probability to fail. I have the most actual display driver in both systems.
Here is my reduced code:
#include <CL/cl.h>
#include <string>
#include <iostream>
#include <sstream>
#include <cmath>
void checkOpenCLErr(cl_int err, std::string name){
const char* errorString[] = {
"CL_SUCCESS",
"CL_DEVICE_NOT_FOUND",
"CL_DEVICE_NOT_AVAILABLE",
"CL_COMPILER_NOT_AVAILABLE",
"CL_MEM_OBJECT_ALLOCATION_FAILURE",
"CL_OUT_OF_RESOURCES",
"CL_OUT_OF_HOST_MEMORY",
"CL_PROFILING_INFO_NOT_AVAILABLE",
"CL_MEM_COPY_OVERLAP",
"CL_IMAGE_FORMAT_MISMATCH",
"CL_IMAGE_FORMAT_NOT_SUPPORTED",
"CL_BUILD_PROGRAM_FAILURE",
"CL_MAP_FAILURE",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"CL_INVALID_VALUE",
"CL_INVALID_DEVICE_TYPE",
"CL_INVALID_PLATFORM",
"CL_INVALID_DEVICE",
"CL_INVALID_CONTEXT",
"CL_INVALID_QUEUE_PROPERTIES",
"CL_INVALID_COMMAND_QUEUE",
"CL_INVALID_HOST_PTR",
"CL_INVALID_MEM_OBJECT",
"CL_INVALID_IMAGE_FORMAT_DESCRIPTOR",
"CL_INVALID_IMAGE_SIZE",
"CL_INVALID_SAMPLER",
"CL_INVALID_BINARY",
"CL_INVALID_BUILD_OPTIONS",
"CL_INVALID_PROGRAM",
"CL_INVALID_PROGRAM_EXECUTABLE",
"CL_INVALID_KERNEL_NAME",
"CL_INVALID_KERNEL_DEFINITION",
"CL_INVALID_KERNEL",
"CL_INVALID_ARG_INDEX",
"CL_INVALID_ARG_VALUE",
"CL_INVALID_ARG_SIZE",
"CL_INVALID_KERNEL_ARGS",
"CL_INVALID_WORK_DIMENSION",
"CL_INVALID_WORK_GROUP_SIZE",
"CL_INVALID_WORK_ITEM_SIZE",
"CL_INVALID_GLOBAL_OFFSET",
"CL_INVALID_EVENT_WAIT_LIST",
"CL_INVALID_EVENT",
"CL_INVALID_OPERATION",
"CL_INVALID_GL_OBJECT",
"CL_INVALID_BUFFER_SIZE",
"CL_INVALID_MIP_LEVEL",
"CL_INVALID_GLOBAL_WORK_SIZE",
};
if (err != CL_SUCCESS) {
std::stringstream str;
str << errorString[-err] << " (" << err << ")";
throw std::string(name)+(str.str());
}
}
int main(){
try{
cl_context m_context;
cl_platform_id* m_platforms;
unsigned int m_numPlatforms;
cl_command_queue m_queue;
cl_device_id m_device;
cl_int error = 0; // Used to handle error codes
clGetPlatformIDs(0,NULL,&m_numPlatforms);
m_platforms = new cl_platform_id[m_numPlatforms];
error = clGetPlatformIDs(m_numPlatforms,m_platforms,&m_numPlatforms);
checkOpenCLErr(error, "getPlatformIDs");
// Device
error = clGetDeviceIDs(m_platforms[0], CL_DEVICE_TYPE_GPU, 1, &m_device, NULL);
checkOpenCLErr(error, "getDeviceIDs");
// Context
cl_context_properties properties[] =
{ CL_CONTEXT_PLATFORM, (cl_context_properties)(m_platforms[0]), 0};
m_context = clCreateContextFromType(properties, CL_DEVICE_TYPE_GPU, NULL, NULL, NULL);
// m_private->m_context = clCreateContext(properties, 1, &m_private->m_device, NULL, NULL, &error);
checkOpenCLErr(error, "Create context");
// Command-queue
m_queue = clCreateCommandQueue(m_context, m_device, 0, &error);
checkOpenCLErr(error, "Create command queue");
//Build program and kernel
const char* source = "#pragma OPENCL EXTENSION cl_khr_byte_addressable_store : enable\n"
"\n"
"__kernel void bufToImage(__global unsigned char* in, __write_only image2d_t out, const unsigned int offset_x, const unsigned int image_width , const unsigned int maxval ){\n"
"\tint i = get_global_id(0);\n"
"\tint j = get_global_id(1);\n"
"\tint width = get_global_size(0);\n"
"\tint height = get_global_size(1);\n"
"\n"
"\tint pos = j*image_width*3+(offset_x+i)*3;\n"
"\tif( maxval < 256 ){\n"
"\t\tfloat4 c = (float4)(in[pos],in[pos+1],in[pos+2],1.0f);\n"
"\t\tc.x /= maxval;\n"
"\t\tc.y /= maxval;\n"
"\t\tc.z /= maxval;\n"
"\t\twrite_imagef(out, (int2)(i,j), c);\n"
"\t}else{\n"
"\t\tfloat4 c = (float4)(255.0f*in[2*pos]+in[2*pos+1],255.0f*in[2*pos+2]+in[2*pos+3],255.0f*in[2*pos+4]+in[2*pos+5],1.0f);\n"
"\t\tc.x /= maxval;\n"
"\t\tc.y /= maxval;\n"
"\t\tc.z /= maxval;\n"
"\t\twrite_imagef(out, (int2)(i,j), c);\n"
"\t}\n"
"}\n"
"\n"
"__constant sampler_t imageSampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;\n"
"\n"
"__kernel void imageToBuf(__read_only image2d_t in, __global unsigned char* out, const unsigned int offset_x, const unsigned int image_width ){\n"
"\tint i = get_global_id(0);\n"
"\tint j = get_global_id(1);\n"
"\tint pos = j*image_width*3+(offset_x+i)*3;\n"
"\tfloat4 c = read_imagef(in, imageSampler, (int2)(i,j));\n"
"\tif( c.x <= 1.0f && c.y <= 1.0f && c.z <= 1.0f ){\n"
"\t\tout[pos] = c.x*255.0f;\n"
"\t\tout[pos+1] = c.y*255.0f;\n"
"\t\tout[pos+2] = c.z*255.0f;\n"
"\t}else{\n"
"\t\tout[pos] = 200.0f;\n"
"\t\tout[pos+1] = 0.0f;\n"
"\t\tout[pos+2] = 255.0f;\n"
"\t}\n"
"}\n";
cl_int err;
cl_program prog = clCreateProgramWithSource(m_context,1,&source,NULL,&err);
if( -err != CL_SUCCESS ) throw std::string("clCreateProgramWithSources");
err = clBuildProgram(prog,0,NULL,"-cl-opt-disable",NULL,NULL);
if( -err != CL_SUCCESS ) throw std::string("clBuildProgram(fromSources)");
cl_kernel kernel = clCreateKernel(prog,"bufToImage",&err);
checkOpenCLErr(err,"CreateKernel");
cl_uint imageWidth = 80;
cl_uint imageHeight = 90;
//Initialize datas
cl_uint maxVal = 255;
cl_uint offsetX = 0;
int size = imageWidth*imageHeight*3;
int resSize = imageWidth*imageHeight*4;
cl_uchar* data = new cl_uchar[size];
cl_float* expectedData = new cl_float[resSize];
for( int i = 0,j=0; i < size; i++,j++ ){
data[i] = (cl_uchar)i;
expectedData[j] = (cl_float)((unsigned char)i)/255.0f;
if ( i%3 == 2 ){
j++;
expectedData[j] = 1.0f;
}
}
cl_mem inBuffer = clCreateBuffer(m_context,CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR,size*sizeof(cl_uchar),data,&err);
checkOpenCLErr(err, "clCreateBuffer()");
clFinish(m_queue);
cl_image_format imgFormat;
imgFormat.image_channel_order = CL_RGBA;
imgFormat.image_channel_data_type = CL_FLOAT;
cl_mem outImg = clCreateImage2D( m_context, CL_MEM_READ_WRITE, &imgFormat, imageWidth, imageHeight, 0, NULL, &err );
checkOpenCLErr(err,"get2DImage()");
clFinish(m_queue);
size_t kernelRegion[]={imageWidth,imageHeight};
size_t kernelWorkgroup[]={1,1};
//Fill kernel with data
clSetKernelArg(kernel,0,sizeof(cl_mem),&inBuffer);
clSetKernelArg(kernel,1,sizeof(cl_mem),&outImg);
clSetKernelArg(kernel,2,sizeof(cl_uint),&offsetX);
clSetKernelArg(kernel,3,sizeof(cl_uint),&imageWidth);
clSetKernelArg(kernel,4,sizeof(cl_uint),&maxVal);
//Run kernel
err = clEnqueueNDRangeKernel(m_queue,kernel,2,NULL,kernelRegion,kernelWorkgroup,0,NULL,NULL);
checkOpenCLErr(err,"RunKernel");
clFinish(m_queue);
//Check resulting data for validty
cl_float* computedData = new cl_float[resSize];;
size_t region[]={imageWidth,imageHeight,1};
const size_t offset[] = {0,0,0};
err = clEnqueueReadImage(m_queue,outImg,CL_TRUE,offset,region,0,0,computedData,0,NULL,NULL);
checkOpenCLErr(err, "readDataFromImage()");
clFinish(m_queue);
for( int i = 0; i < resSize; i++ ){
if( fabs(expectedData[i]-computedData[i])>0.1 ){
std::cout << "Expected: \n";
for( int j = 0; j < resSize; j++ ){
std::cout << expectedData[j] << " ";
}
std::cout << "\nComputed: \n";
std::cout << "\n";
for( int j = 0; j < resSize; j++ ){
std::cout << computedData[j] << " ";
}
std::cout << "\n";
throw std::string("Error, computed and expected data are not the same!\n");
}
}
}catch(std::string& e){
std::cout << "\nCaught an exception: " << e << "\n";
return 1;
}
std::cout << "Works fine\n";
return 0;
}
I also uploaded the source code for you to make it easier to test it:
http://www.file-upload.net/download-3524302/strangeOpenCLError.cpp.html
Please can you tell me if I've done wrong anything?
Is there any mistake in the code or is this a bug in my driver?
Best reagards,
Alex
Edit: changed the program (both: here and the linked one) a little bit to make it more likely to get a mismatch.
I found the bug and this is an annoying one:
When working under linux and just linking the OpenCL program with the most actual "OpenCV" library (yes, the computation lib), the binary parts of the kernels, which get compiled and cached in ~/.nv are damaged.
Can you please install the actual OpenCV library and execute following commands:
Generating bad kernel maybe leading sometimes to bad behaviour:
rm -R ~/.nv && g++ strangeOpenCLError.cpp -lOpenCL -lopencv_gpu -o strangeOpenCLError && ./strangeOpenCLError && ls -la ~/.nv/ComputeCache/*/*
Generating good kernel which performs as desired:
rm -R ~/.nv && g++ strangeOpenCLError.cpp -lOpenCL -o strangeOpenCLError && ./strangeOpenCLError && ls -la ~/.nv/ComputeCache/*/*
In my system when using -lopencv_gpu or -lopencv_core I get a kernel object in ~/.nv with a slightly other size due to sightly different binary parts. So these smaller kernels computed bad results in my systems.
The problem is that the bug does not always appear: Sometimes just when working on buffers, which are big enough. So the more relyable measurement is the different kernel-cache size. I edited the program in my question, now it is more likely that it will create the bad result.
Best regards,
Alex
PS: I also created a bug report at NVidia and it is in progress. They could reproduce the bug on their system.
To turn off Nvidia compiler cache, set env. variable CUDA_CACHE_DISABLE=1. That may helps to avoid the problem in future.
In line
m_context = clCreateContextFromType(properties, CL_DEVICE_TYPE_GPU, NULL, NULL, NULL);
you should use &error as last parameter to get a meaningful error. Without it I got some silly error messages. (I needed to change the platform to get my GPU board.)
I can not reproduce the error with my nVidia GeForce 8600 GTS. I get a 'Works fine'. I tried it >20 times without any issue.
I also can not see any error beside that you code is a little confusing. You should remove all commented out code and introduce some blank lines for grouping the code a little bit.
Do you have the latest drivers? The behavior you describe sounds very familiar like an uninitialized buffer or variable, but I do not see anything like that.
I have problem with passing structure to kernel local memory. Here is the kernel kode:
typedef struct data {
unsigned long wId; // group_id
unsigned long iId[1]; // global_item_id
} DATA;
__kernel void tKernel(__global DATA *x, __local DATA tmp) {
int wd = get_work_dim();
// x dimension
int xGrId = get_group_id(0);
int xLId = get_local_id(0);
int xGlId = get_global_id(0);
x += xGrId;
x->wId = tmp.wId;
x->iId[xLId] = ++tmp.wId;
}
Here is the host code:
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#define GLOBAL_ITEM_SIZE (1)
#define LOCAL_ITEM_SIZE (1)
#define MAX_SOURCE_SIZE (0x100000)
typedef struct data {
unsigned long wId;
unsigned long iId[LOCAL_ITEM_SIZE];
} DATA;
int main() {
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_context context = NULL;
cl_command_queue commandQueue = NULL;
cl_mem cmPinnedBufOut = NULL;
DATA *cDataOut = NULL;
cl_program program = NULL;
cl_kernel kernel = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret;
size_t group_size = GLOBAL_ITEM_SIZE / LOCAL_ITEM_SIZE;
FILE *fp;
const char fileName[] = "./kernel.cl";
size_t source_size;
char *source_str;
/* Load kernel source file */
fp = fopen(fileName, "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(EXIT_FAILURE);
}
source_str = (char *)malloc(MAX_SOURCE_SIZE);
source_size = fread(source_str, 1, MAX_SOURCE_SIZE, fp);
fclose(fp);
/* Create OpenCL Context */
context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
/* Create command queue with measurment of preformance */
commandQueue = clCreateCommandQueue(context, device_id, 0, &ret);
/* Create memory object */
cmPinnedBufOut = clCreateBuffer(context, CL_MEM_WRITE_ONLY | CL_MEM_ALLOC_HOST_PTR, group_size * sizeof(DATA), NULL, &ret);
cDataOut = (DATA *)malloc(group_size * sizeof(DATA));
/* Create kernel program from source file */
program = clCreateProgramWithSource(context, 1, (const char **)&source_str, (const size_t *)&source_size, &ret);
assert(ret == CL_SUCCESS);
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
if (ret != CL_SUCCESS) {
printf("\nFail to build the program\n");
char buffer[10240];
clGetProgramBuildInfo(program, device_id, CL_PROGRAM_BUILD_LOG, sizeof(buffer), buffer, NULL);
printf("%s\n", buffer);
exit(EXIT_FAILURE);
}
/* Create data parallel OpenCL kernel */
kernel = clCreateKernel(program, "tKernel", &ret);
assert(ret == CL_SUCCESS);
/* Set OpenCL kernel arguments */
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&cmPinnedBufOut);
assert(ret == CL_SUCCESS);
DATA tmp;
tmp.wId = 66;
ret = clSetKernelArg(kernel, 1, sizeof(DATA), &tmp);
assert(ret == CL_SUCCESS);
size_t global_item_size = GLOBAL_ITEM_SIZE;
size_t local_item_size = LOCAL_ITEM_SIZE;
/* Execute OpenCL kernel as data parallel */
ret = clEnqueueNDRangeKernel(commandQueue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL);
if (ret == CL_INVALID_WORK_GROUP_SIZE) {
printf("Invalid work group size: error when compute group size: %lu/%lu", global_item_size, local_item_size);
exit(EXIT_FAILURE);
}
assert(ret == CL_SUCCESS);
/* Transfer result to host */
ret = clEnqueueReadBuffer(commandQueue, cmPinnedBufOut, CL_TRUE, 0, group_size * sizeof(DATA), cDataOut, 0, NULL, NULL);
assert(ret == CL_SUCCESS);
/* Display Results */
for (int i = 0; i < group_size; i++) {
printf("%d: -> group_id %lu ~> work_item_ids: ", i, cDataOut[i].wId);
for (int j = 0; j < LOCAL_ITEM_SIZE; j++)
printf("%2lu, ", cDataOut[i].iId[j]);
printf("\n");
}
printf("\n");
/* Finalization */
ret = clFlush(commandQueue);
ret = clFinish(commandQueue); // blockink function, wait until all queue cmd are finished
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseCommandQueue(commandQueue);
ret = clReleaseContext(context);
free(source_str);
return 0;
}
So I expected as result 0: -> group_id 66 ~> work_item_ids: 67,
But I get 0: -> group_id 0 ~> work_item_ids: 1,
From this I conclude that the hh structure with the number 66 was not reading correctly
by the kernel. I try to put this same way on integer number and this works perfectly.
So my question is, am I doing something wrong, or there isn't way to copy data structure from host to device local memory, or is there another way to doing this?
The clSetKernelArg for __local buffers only specifies the size, and the pointer must be 0. See OpenCL spec 5.7.2. There is no way you can initialize local memory from the host.