Converting cv::Mat to MTLTexture - opencv

An intermediate step of my current project requires conversion of opencv's cv::Mat to MTLTexture, the texture container of Metal. I need to store the Floats in the Mat as Floats in the texture; my project cannot quite afford the loss of precision.
This is my attempt at such a conversion.
- (id<MTLTexture>)texForMat:(cv::Mat)image context:(MBEContext *)context
{
id<MTLTexture> texture;
int width = image.cols;
int height = image.rows;
Float32 *rawData = (Float32 *)calloc(height * width * 4,sizeof(float));
int bytesPerPixel = 4;
int bytesPerRow = bytesPerPixel * width;
float r, g, b,a;
for(int i = 0; i < height; i++)
{
Float32* imageData = (Float32*)(image.data + image.step * i);
for(int j = 0; j < width; j++)
{
r = (Float32)(imageData[4 * j]);
g = (Float32)(imageData[4 * j + 1]);
b = (Float32)(imageData[4 * j + 2]);
a = (Float32)(imageData[4 * j + 3]);
rawData[image.step * (i) + (4 * j)] = r;
rawData[image.step * (i) + (4 * j + 1)] = g;
rawData[image.step * (i) + (4 * j + 2)] = b;
rawData[image.step * (i) + (4 * j + 3)] = a;
}
}
MTLTextureDescriptor *textureDescriptor = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatRGBA16Float
width:width
height:height
mipmapped:NO];
texture = [context.device newTextureWithDescriptor:textureDescriptor];
MTLRegion region = MTLRegionMake2D(0, 0, width, height);
[texture replaceRegion:region mipmapLevel:0 withBytes:rawData bytesPerRow:bytesPerRow];
free(rawData);
return texture;
}
But it doesn't seem to be working. It reads zeroes every time from the Mat, and throws up EXC_BAD_ACCESS. I need the MTLTexture in MTLPixelFormatRGBA16Float to keep the precision.
Thanks for considering this issue.

One problem here is you’re loading up rawData with Float32s but your texture is RGBA16Float, so the data will be corrupted (16Float is half the size of Float32). This shouldn’t cause your crash, but it’s an issue you’ll have to deal with.
Also as “chappjc” noted you’re using ‘image.step’ when writing your data out, but that buffer should be contiguous and not ever have a step that’s not just (width * bytesPerPixel).

Related

how to convert pixelBuffer from BGRA to YUV

i want to convert pixelBuffer from BGRA to YUV(420V).
Using the convert function, most of the videos in my mobile phone photo albums are running normally ,
Execpt the one video from my colleagues, after converted the pixels are insanity,
the video from my colleagues is quite normal,
Video
ID : 1
Format : AVC
Format/Info : Advanced Video Codec
Format profile : Main#L3.1
Format settings : CABAC / 1 Ref Frames
Format settings, CABAC : Yes
Format settings, Reference frames : 1 frame
Format settings, GOP : M=1, N=15
Codec ID : avc1
Codec ID/Info : Advanced Video Coding
Duration : 6 s 623 ms
Source duration : 6 s 997 ms
Bit rate : 4 662 kb/s
Width : 884 pixels
Clean aperture width : 884 pixels
Height : 492 pixels
Clean aperture height : 492 pixels
Display aspect ratio : 16:9
Original display aspect ratio : 16:9
Frame rate mode : Variable
Frame rate : 57.742 FPS
Minimum frame rate : 20.000 FPS
Maximum frame rate : 100.000 FPS
Color space : YUV
Chroma subsampling : 4:2:0
Bit depth : 8 bits
Scan type : Progressive
Bits/(Pixel*Frame) : 0.186
Stream size : 3.67 MiB (94%)
Source stream size : 3.79 MiB (97%)
Title : Core Media Video
Encoded date : UTC 2021-10-29 09:54:03
Tagged date : UTC 2021-10-29 09:54:03
Color range : Limited
Color primaries : Display P3
Transfer characteristics : BT.709
Matrix coefficients : BT.709
Codec configuration box : avcC
this is my function, i do not know what is wrong.
CFDictionaryRef CreateCFDictionary(CFTypeRef* keys, CFTypeRef* values, size_t size) {
return CFDictionaryCreate(kCFAllocatorDefault,
keys,
values,
size,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
}
static void bt709_rgb2yuv8bit_TV(uint8_t R, uint8_t G, uint8_t B, uint8_t &Y, uint8_t &U, uint8_t &V)
{
Y = 0.183 * R + 0.614 * G + 0.062 * B + 16;
U = -0.101 * R - 0.339 * G + 0.439 * B + 128;
V = 0.439 * R - 0.399 * G - 0.040 * B + 128;
}
CVPixelBufferRef RGB2YCbCr8Bit(CVPixelBufferRef pixelBuffer)
{
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(pixelBuffer);
int w = (int) CVPixelBufferGetWidth(pixelBuffer);
int h = (int) CVPixelBufferGetHeight(pixelBuffer);
// int stride = (int) CVPixelBufferGetBytesPerRow(pixelBuffer) / 4;
OSType pixelFormat = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
CVPixelBufferRef pixelBufferCopy = NULL;
const size_t attributes_size = 1;
CFTypeRef keys[attributes_size] = {
kCVPixelBufferIOSurfacePropertiesKey,
};
CFDictionaryRef io_surface_value = CreateCFDictionary(nullptr, nullptr, 0);
CFTypeRef values[attributes_size] = {io_surface_value};
CFDictionaryRef attributes = CreateCFDictionary(keys, values, attributes_size);
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault,
w,
h,
pixelFormat,
attributes,
&pixelBufferCopy);
if (status != kCVReturnSuccess) {
std::cout << "YUVBufferCopyWithPixelBuffer :: failed" << std::endl;
return nullptr;
}
if (attributes) {
CFRelease(attributes);
attributes = nullptr;
}
CVPixelBufferLockBaseAddress(pixelBufferCopy, 0);
size_t y_stride = CVPixelBufferGetBytesPerRowOfPlane(pixelBufferCopy, 0);
size_t uv_stride = CVPixelBufferGetBytesPerRowOfPlane(pixelBufferCopy, 1);
int plane_h1 = (int) CVPixelBufferGetHeightOfPlane(pixelBufferCopy, 0);
int plane_h2 = (int) CVPixelBufferGetHeightOfPlane(pixelBufferCopy, 1);
uint8_t *y = (uint8_t *) CVPixelBufferGetBaseAddressOfPlane(pixelBufferCopy, 0);
memset(y, 0x80, plane_h1 * y_stride);
uint8_t *uv = (uint8_t *) CVPixelBufferGetBaseAddressOfPlane(pixelBufferCopy, 1);
memset(uv, 0x80, plane_h2 * uv_stride);
int y_bufferSize = w * h;
int uv_bufferSize = w * h / 4;
uint8_t *y_planeData = (uint8_t *) malloc(y_bufferSize * sizeof(uint8_t));
uint8_t *u_planeData = (uint8_t *) malloc(uv_bufferSize * sizeof(uint8_t));
uint8_t *v_planeData = (uint8_t *) malloc(uv_bufferSize * sizeof(uint8_t));
int u_offset = 0;
int v_offset = 0;
uint8_t R, G, B;
uint8_t Y, U, V;
for (int i = 0; i < h; i ++) {
for (int j = 0; j < w; j ++) {
int offset = i * w + j;
B = baseAddress[offset * 4];
G = baseAddress[offset * 4 + 1];
R = baseAddress[offset * 4 + 2];
bt709_rgb2yuv8bit_TV(R, G, B, Y, U, V);
y_planeData[offset] = Y;
//隔行扫描 偶数行的偶数列取U 奇数行的偶数列取V
if (j % 2 == 0) {
(i % 2 == 0) ? u_planeData[u_offset++] = U : v_planeData[v_offset++] = V;
}
}
}
for (int i = 0; i < plane_h1; i ++) {
memcpy(y + i * y_stride, y_planeData + i * w, w);
if (i < plane_h2) {
for (int j = 0 ; j < w ; j+=2) {
//NV12 和 NV21 格式都属于 YUV420SP 类型。它也是先存储了 Y 分量,但接下来并不是再存储所有的 U 或者 V 分量,而是把 UV 分量交替连续存储。
//NV12 是 IOS 中有的模式,它的存储顺序是先存 Y 分量,再 UV 进行交替存储。
memcpy(uv + i * y_stride + j, u_planeData + i * w/2 + j/2, 1);
memcpy(uv + i * y_stride + j + 1, v_planeData + i * w/2 + j/2, 1);
}
}
}
free(y_planeData);
free(u_planeData);
free(v_planeData);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
CVPixelBufferUnlockBaseAddress(pixelBufferCopy, 0);
return pixelBufferCopy;
}
pixelBuffer BGRA is normal
pixelBuffer YUV insanity
In the video metadata there is a line Color space: YUV It looks like that this video isn't BGRA
When you calculate source pixel you must use stride (length of image row in bytes) instead of width because distance between rows in image may be bigger than width * pixel_size_in_bytes. I recommend to check this case on images with odd width.
int offset = i * stride + j;
You already has it commented at the beginning of function:
int stride = (int) CVPixelBufferGetBytesPerRow(pixelBuffer) / 4;
It is better to use builtin functions for converting images. Here is an example from one of my projects:
vImage_CGImageFormat out_cg_format = CreateVImage_CGImageFormat( target_pixel_format );
CGColorSpaceRef color_space = CGColorSpaceCreateDeviceRGB();
vImageCVImageFormatRef in_cv_format = vImageCVImageFormat_Create(
MSPixFmt_to_CVPixelFormatType(source_pixel_format),
kvImage_ARGBToYpCbCrMatrix_ITU_R_601_4,
kCVImageBufferChromaLocation_Center,
color_space,
0 );
CGColorSpaceRelease(color_space);
CGColorSpaceRelease(out_cg_format.colorSpace);
vImage_Error err = kvImageNoError;
vImageConverterRef converter = vImageConverter_CreateForCVToCGImageFormat(in_cv_format, &out_cg_format, NULL, kvImagePrintDiagnosticsToConsole, &err);
vImage_Buffer src_planes[4] = {{0}};
vImage_Buffer dst_planes[4] = {{0}};
unsigned long source_plane_count = vImageConverter_GetNumberOfSourceBuffers(converter);
for( unsigned int i = 0; i < source_plane_count; i++ )
{
src_planes[i] = (vImage_Buffer){planes_in[i], pic_size.height, pic_size.width, strides_in[i]};
}
unsigned long target_plane_count = vImageConverter_GetNumberOfDestinationBuffers(converter);
for( unsigned int i = 0; i < target_plane_count; i++ )
{
dst_planes[i] = (vImage_Buffer){planes_out[i], pic_size.height, pic_size.width, strides_out[i]};
}
err = vImageConvert_AnyToAny(converter, src_planes, dst_planes, NULL, kvImagePrintDiagnosticsToConsole);

Separable gaussian blur - optimize vertical pass

I have implemented separable Gaussian blur. Horizontal pass was relatively easy to optimize with SIMD processing. However, I am not sure how to optimize vertical pass.
Accessing elements is not very cache friendly and filling SIMD lane would mean reading many different pixels. I was thinking about transpose the image and run horizontal pass and then transpose image back, however, I am not sure if it will gain any improvement because of two tranpose operations.
I have quite large images 16k resolution and kernel size is 19, so vectorization of vertical pass gain was about 15%.
My Vertical pass is as follows (it is sinde generic class typed to T which can be uint8_t or float):
int yStart = kernelHalfSize;
int xStart = kernelHalfSize;
int yEnd = input.GetWidth() - kernelHalfSize;
int xEnd = input.GetHeigh() - kernelHalfSize;
const T * inData = input.GetData().data();
V * outData = output.GetData().data();
int kn = kernelHalfSize * 2 + 1;
int kn4 = kn - kn % 4;
for (int y = yStart; y < yEnd; y++)
{
size_t yW = size_t(y) * output.GetWidth();
size_t outX = size_t(xStart) + yW;
size_t xEndSimd = xStart;
int len = xEnd - xStart;
len = len - len % 4;
xEndSimd = xStart + len;
for (int x = xStart; x < xEndSimd; x += 4)
{
size_t inYW = size_t(y) * input.GetWidth();
size_t x0 = ((x + 0) - kernelHalfSize) + inYW;
size_t x1 = x0 + 1;
size_t x2 = x0 + 2;
size_t x3 = x0 + 3;
__m128 sumDot = _mm_setzero_ps();
int i = 0;
for (; i < kn4; i += 4)
{
__m128 kx = _mm_set_ps1(kernelDataX[i + 0]);
__m128 ky = _mm_set_ps1(kernelDataX[i + 1]);
__m128 kz = _mm_set_ps1(kernelDataX[i + 2]);
__m128 kw = _mm_set_ps1(kernelDataX[i + 3]);
__m128 dx, dy, dz, dw;
if constexpr (std::is_same<T, uint8_t>::value)
{
//we need co convert uint8_t inputs to float
__m128i u8_0 = _mm_loadu_si128((const __m128i*)(inData + x0));
__m128i u8_1 = _mm_loadu_si128((const __m128i*)(inData + x1));
__m128i u8_2 = _mm_loadu_si128((const __m128i*)(inData + x2));
__m128i u8_3 = _mm_loadu_si128((const __m128i*)(inData + x3));
__m128i u32_0 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_0, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_1 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_1, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_2 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_2, _mm_setzero_si128()),
_mm_setzero_si128());
__m128i u32_3 = _mm_unpacklo_epi16(
_mm_unpacklo_epi8(u8_3, _mm_setzero_si128()),
_mm_setzero_si128());
dx = _mm_cvtepi32_ps(u32_0);
dy = _mm_cvtepi32_ps(u32_1);
dz = _mm_cvtepi32_ps(u32_2);
dw = _mm_cvtepi32_ps(u32_3);
}
else
{
/*
//load 8 consecutive values
auto dd = _mm256_loadu_ps(inData + x0);
//extract parts by shifting and casting to 4 values float
dx = _mm256_castps256_ps128(dd);
dy = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 4, 3, 2, 1)));
dz = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 5, 4, 3, 2)));
dw = _mm256_castps256_ps128(_mm256_permutevar8x32_ps(dd, _mm256_set_epi32(0, 0, 0, 0, 6, 5, 4, 3)));
*/
dx = _mm_loadu_ps(inData + x0);
dy = _mm_loadu_ps(inData + x1);
dz = _mm_loadu_ps(inData + x2);
dw = _mm_loadu_ps(inData + x3);
}
//calculate 4 dots at once
//[dx, dy, dz, dw] <dot> [kx, ky, kz, kw]
auto mx = _mm_mul_ps(dx, kx); //dx * kx
auto my = _mm_fmadd_ps(dy, ky, mx); //mx + dy * ky
auto mz = _mm_fmadd_ps(dz, kz, my); //my + dz * kz
auto res = _mm_fmadd_ps(dw, kw, mz); //mz + dw * kw
sumDot = _mm_add_ps(sumDot, res);
x0 += 4;
x1 += 4;
x2 += 4;
x3 += 4;
}
for (; i < kn; i++)
{
auto v = _mm_set_ps1(kernelDataX[i]);
auto v2 = _mm_set_ps(
*(inData + x3), *(inData + x2),
*(inData + x1), *(inData + x0)
);
sumDot = _mm_add_ps(sumDot, _mm_mul_ps(v, v2));
x0++;
x1++;
x2++;
x3++;
}
sumDot = _mm_mul_ps(sumDot, _mm_set_ps1(weightX));
if constexpr (std::is_same<V, uint8_t>::value)
{
__m128i asInt = _mm_cvtps_epi32(sumDot);
asInt = _mm_packus_epi32(asInt, asInt);
asInt = _mm_packus_epi16(asInt, asInt);
uint32_t res = _mm_cvtsi128_si32(asInt);
((uint32_t *)(outData + outX))[0] = res;
outX += 4;
}
else
{
float tmpRes[4];
_mm_store_ps(tmpRes, sumDot);
outData[outX + 0] = tmpRes[0];
outData[outX + 1] = tmpRes[1];
outData[outX + 2] = tmpRes[2];
outData[outX + 3] = tmpRes[3];
outX += 4;
}
}
for (int x = xEndSimd; x < xEnd; x++)
{
int kn = kernelHalfSize * 2 + 1;
const T * v = input.GetPixelStart(x - kernelHalfSize, y);
float tmp = 0;
for (int i = 0; i < kn; i++)
{
tmp += kernelDataX[i] * v[i];
}
tmp *= weightX;
outData[outX] = ImageUtils::clamp_cast<V>(tmp);
outX++;
}
}
There’s a well-known trick for that.
While you compute both passes, read them sequentially, use SIMD to compute, but write out the result into another buffer, transposed, using scalar stores. Protip: SSE 4.1 has _mm_extract_ps just don’t forget to cast your destination image pointer from float* into int*. Another thing about these stores, I would recommend using _mm_stream_si32 for that as you want maximum cache space used by your input data. When you’ll be computing the second pass, you’ll be reading sequential memory addresses again, the prefetcher hardware will deal with the latency.
This way both passes will be identical, I usually call same function twice, with different buffers.
Two transposes caused by your 2 passes cancel each other. Here’s an HLSL version, BTW.
There’s more. If your kernel size is only 19, that fits in 3 AVX registers. I think shuffle/permute/blend instructions are still faster than even L1 cache loads, i.e. it might be better to load the kernel outside the loop.

Converting a 24-bit PNG image to an array of GLubytes

I'd like to do the following:
Read RGB color values from a 24 bit PNG image
Average the RGB values and store them into an array of Glubytes.
I have provided my function that I was hoping would perform these 2 steps.
My function returns an array of Glubytes, however all elements have a value of 0.
So im guessing im reading the image data incorrectly.
What am i going wrong in reading the image? (perhaps my format is incorrect).
Here is my function:
+ (GLubyte *) LoadPhotoAveragedIndexPNG:(UIImage *)image numPixelComponents: (int)numComponents
{
// Load an image and return byte array.
CGImageRef textureImage = image.CGImage;
if (textureImage == nil)
{
NSLog(#"LoadPhotoIndexPNG: Failed to load texture image");
return nil;
}
NSInteger texWidth = CGImageGetWidth(textureImage);
NSInteger texHeight = CGImageGetHeight(textureImage);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
GLubyte *indexedData = (GLubyte *)malloc(texWidth * texHeight);
GLubyte *rawData = (GLubyte *)malloc(texWidth * texHeight * numComponents);
CGContextRef textureContext = CGBitmapContextCreate(
rawData,
texWidth,
texHeight,
8,
texWidth * numComponents,
colorSpace,
kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(textureContext,
CGRectMake(0.0, 0.0, (float)texWidth, (float)texHeight),
textureImage);
CGContextRelease(textureContext);
int rawDataLength = texWidth * texHeight * numComponents;
for (int i = 0, j = 0; i < rawDataLength; i += numComponents)
{
GLubyte b = rawData[i];
GLubyte g = rawData[i + 1];
GLubyte r = rawData[i + 2];
indexedData[j++] = (r + g + b) / 3;
}
return indexedData;
}
Here is the test image im loading (RGB colorspace in PNG format):
Do check with some logging if the parameters b,g and r are producing normal values in the last for loop. Where you made a mistake is indexedData[j++] = (r + g + b) / 3; those 3 parameters are sizeof 1 byte and you can not sum them up like that. Use a larger integer, typecast them and typecast the result back to array. (You are most likely getting overflow)
Apart from your original problem there's a major problem here (maybe even related)
for (int i = 0, j = 0; i < rawDataLength; i += numComponents)
{
GLubyte b = rawData[i];
GLubyte g = rawData[i + 1];
GLubyte r = rawData[i + 2];
indexedData[j++] = (r + g + b) / 3;
}
Namely the expression
(r + g + b)
This expression will be performed on GLubyte sized integer operations. If the sum of r+g+b is larger than the type GLubyte can hold it will overflow. Whenever you're processing data through intermediary variables (good style!) choose the variable types large enough to hold the largest value you can encounter. Another method was casting the expression like
indexedData[j++] = ((uint16_t)r + (uint16_t)g + (uint16_t)b) / 3;
But that's cumbersome to read. Also if you're processing integers of a known size, use the types found in stdint.h. You know, that you're expecting 8 bits per channel. Also you can use the comma operator in the for increment clause
uint8_t *indexedData = (GLubyte *)malloc(texWidth * texHeight);
/* ... */
for (int i = 0, j = 0; i < rawDataLength; i += numComponents, j++)
{
uint16_t b = rawData[i];
uint16_t g = rawData[i + 1];
uint16_t r = rawData[i + 2];
indexedData[j] = (r + g + b) / 3;
}

JavaCV creating and drawing grayscale one channel histogram

i am new to this website, please let me know if i have made any mistake on my post.
I have some questions regarding calculating and drawing histogram in javacv. Below are the codes that i have written based on some information that i have searched:
There is this error that i get: OpenCV Error: One of arguments' values is out of range (index is out of range) in unknown function, file ......\src\opencv\modules\core\src\array.cpp, line 1691
private CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImage[] hsvImage1 = {image};
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImage DrawHistogram(CvHistogram hist, IplImage image) {//draw histogram
int scaleX = 1;
int scaleY = 1;
int i;
float[] max_value = {0};
int[] int_value = {0};
cvGetMinMaxHistValue(hist, max_value, max_value, int_value, int_value);//get min and max value for histogram
IplImage imgHist = cvCreateImage(cvSize(256, image.height() ),IPL_DEPTH_8U,1);//create image to store histogram
cvZero(imgHist);
CvPoint pts = new CvPoint(5);
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
pts.position(0).x(i * scaleX).y(image.height() * scaleY);
pts.position(1).x(i * scaleX + scaleX).y(image.height() * scaleY);
pts.position(2).x(i * scaleX + scaleX).y((int)((image.height() - nextValue * image.height() /max_value[0]) * scaleY));
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY));
pts.position(4).x(i * scaleX).y(image.height() * scaleY);
cvFillConvexPoly(imgHist, pts.position(0), 5, CvScalar.RED, CV_AA, 0);
}
return imgHist;
}
I have tried searching few links that i provided at the bottom, however, each of them are in different language, therefore i am not sure i have converted them to java correctly. To be honest there are few things i doubt, will be glad if any advice can be provided, such as:
float[] max_value = {0}; // i referred to the internet and it helps me to getby syntax error in cvGetMinMaxHistValue() , not sure if it will cause logic error
pts.position(3).x(i * scaleX).y((int)((image.height() - value * image.height() / max_value[0]) * scaleY)); // i put int to downcast it to the type the pts will recognise, and one more thing is max_value[0] is 0, wondering if it will cause logical error due to division
Links used:
//use this
public CvHistogram getHistogram(IplImage image) {//get histogram data, input has been converted to grayscale beforehand
IplImageArray hsvImage1 = splitChannels(image);
//bins and value-range
int numberOfBins = 256;
float minRange = 0.0f;
float maxRange = 255.0f;
// Allocate histogram object
int dims = 1;
int[] sizes = new int[]{numberOfBins};
int histType = CV_HIST_ARRAY;
float[] minMax = new float[]{minRange, maxRange};
float[][] ranges = new float[][]{minMax};
CvHistogram hist = cvCreateHist(dims, sizes, histType, ranges, 1);
cvCalcHist(hsvImage1, hist, 0, null);
return hist;
}
private IplImageArray splitChannels(IplImage hsvImage) {
CvSize size = hsvImage.cvSize();
int depth = hsvImage.depth();
IplImage channel0 = cvCreateImage(size, depth, 1);
IplImage channel1 = cvCreateImage(size, depth, 1);
IplImage channel2 = cvCreateImage(size, depth, 1);
cvSplit(hsvImage, channel0, channel1, channel2, null);
return new IplImageArray(channel0, channel1, channel2);
}
Your error is in this part:
for (i = 0; i < 256; i++) {//draw the histogram
float value = opencv_legacy.cvQueryHistValue_1D(hist, i);
float nextValue = opencv_legacy.cvQueryHistValue_1D(hist, i + 1);
You use i+1 and it causes the error out of range, you can use your for until 255 to correct it.
I hope I helped you. GL

Why do operations with an array corrupt the values?

I'm trying to implement the Particle Swarm Optimization on CUDA. I'm partially initializing data arrays on host, then I allocate memory on CUDA and copy it there, and then try to proceed with the initialization.
The problem is, when I'm trying to modify array element like so
__global__ void kernelInit(
float* X,
size_t pitch,
int width,
float X_high,
float X_low
) {
// Silly, but pretty reliable way to address array elements
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = tid / width;
int c = tid % width;
float* pElement = (float*)((char*)X + r * pitch) + c;
*pElement = *pElement * (X_high - X_low) - X_low;
//*pElement = (X_high - X_low) - X_low;
}
It corrupts the values and gives me 1.#INF00 as array element. When I uncomment the last line *pElement = (X_high - X_low) - X_low; and comment the previous, it works as expected: I get values like 15.36 and so on.
I believe the problem is either with my memory allocation and copying, and/or with adressing the specific array element. I read the CUDA manual about these both topics, but I can't spot the error: I still get corrupt array if I do anything with the element of the array. For example, *pElement = *pElement * 2 gives unreasonable big results like 779616...00000000.00000 when the initial pElement is expected to be just a float in [0;1].
Here is the full source. Initialization of arrays begins in main (bottom of the source), then f1 function does the work for CUDA and launches the initialization kernel kernelInit:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
const unsigned f_n = 3;
const unsigned n = 2;
const unsigned p = 64;
typedef struct {
unsigned k_max;
float c1;
float c2;
unsigned p;
float inertia_factor;
float Ef;
float X_low[f_n];
float X_high[f_n];
float X_min[n][f_n];
} params_t;
typedef void (*kernelWrapperType) (
float *X,
float *X_highVec,
float *V,
float *X_best,
float *Y,
float *Y_best,
float *X_swarmBest,
bool &termination,
const float &inertia,
const params_t *params,
const unsigned &f
);
typedef float (*twoArgsFuncType) (
float x1,
float x2
);
__global__ void kernelInit(
float* X,
size_t pitch,
int width,
float X_high,
float X_low
) {
// Silly, but pretty reliable way to address array elements
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = tid / width;
int c = tid % width;
float* pElement = (float*)((char*)X + r * pitch) + c;
*pElement = *pElement * (X_high - X_low) - X_low;
//*pElement = (X_high - X_low) - X_low;
}
__device__ float kernelF1(
float x1,
float x2
) {
float y = pow(x1, 2.f) + pow(x2, 2.f);
return y;
}
void f1(
float *X,
float *X_highVec,
float *V,
float *X_best,
float *Y,
float *Y_best,
float *X_swarmBest,
bool &termination,
const float &inertia,
const params_t *params,
const unsigned &f
) {
float *X_d = NULL;
float *Y_d = NULL;
unsigned length = n * p;
const cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>();
size_t pitch;
size_t dpitch;
cudaError_t err;
unsigned width = n;
unsigned height = p;
err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
pitch = n * sizeof(float);
err = cudaMemcpy2D(X_d, dpitch, X, pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);
err = cudaMalloc (&Y_d, sizeof(float) * p);
err = cudaMemcpy (Y_d, Y, sizeof(float) * p, cudaMemcpyHostToDevice);
dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;
kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);
err = cudaMemcpy2D(X, pitch, X_d, dpitch, n*sizeof(float), p, cudaMemcpyDeviceToHost);
err = cudaFree(X_d);
err = cudaMemcpy(Y, Y_d, sizeof(float) * p, cudaMemcpyDeviceToHost);
err = cudaFree(Y_d);
}
float F1(
float x1,
float x2
) {
float y = pow(x1, 2.f) + pow(x2, 2.f);
return y;
}
/*
* Generates random float in [0.0; 1.0]
*/
float frand(){
return (float)rand()/(float)RAND_MAX;
}
/*
* This is the main routine which declares and initializes the integer vector, moves it to the device, launches kernel
* brings the result vector back to host and dumps it on the console.
*/
int main() {
const params_t params = {
100,
0.5,
0.5,
p,
0.98,
0.01,
{-5.12, -2.048, -5.12},
{5.12, 2.048, 5.12},
{{0, 1, 0}, {0, 1, 0}}
};
float X[p][n];
float X_highVec[n];
float V[p][n];
float X_best[p][n];
float Y[p] = {0};
float Y_best[p] = {0};
float X_swarmBest[n];
kernelWrapperType F_wrapper[f_n] = {&f1, &f1, &f1};
twoArgsFuncType F[f_n] = {&F1, &F1, &F1};
for (unsigned f = 0; f < f_n; f++) {
printf("Optimizing function #%u\n", f);
srand ( time(NULL) );
for (unsigned i = 0; i < p; i++)
for (unsigned j = 0; j < n; j++)
X[i][j] = X_best[i][j] = frand();
for (int i = 0; i < n; i++)
X_highVec[i] = params.X_high[f];
for (unsigned i = 0; i < p; i++)
for (unsigned j = 0; j < n; j++)
V[i][j] = frand();
for (unsigned i = 0; i < p; i++)
Y_best[i] = F[f](X[i][0], X[i][1]);
for (unsigned i = 0; i < n; i++)
X_swarmBest[i] = params.X_high[f];
float y_swarmBest = F[f](X_highVec[0], X_highVec[1]);
bool termination = false;
float inertia = 1.;
for (unsigned k = 0; k < params.k_max; k++) {
F_wrapper[f]((float *)X, X_highVec, (float *)V, (float *)X_best, Y, Y_best, X_swarmBest, termination, inertia, &params, f);
}
for (unsigned i = 0; i < p; i++)
{
for (unsigned j = 0; j < n; j++)
{
printf("%f\t", X[i][j]);
}
printf("F = %f\n", Y[i]);
}
getchar();
}
}
Update: I tried adding error handling like so
err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
if (err != cudaSuccess) {
fprintf(stderr, cudaGetErrorString(err));
exit(1);
}
after each API call, but it gave me nothing and didn't return (I still get all the results and program works to the end).
This is an unnecessarily complex piece of code for what should be a simple repro case, but this immediately jumps out:
const unsigned n = 2;
const unsigned p = 64;
unsigned length = n * p
dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;
kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);
So you are firstly computing the incorrect number of blocks, and then reversing the order of the blocks per grid and threads per block arguments in the kernel launch. That may well lead to out of bounds memory access, either hosing something in GPU memory or causing an unspecified launch failure, which your lack of error handling might not be catching. There is a tool called cuda-memcheck which has been shipped with the toolkit since about CUDA 3.0. If you run it, it will give you valgrind style memory access violation reports. You should get into the habit of using it, if you are not already doing so.
As for infinite values, that is to be expected isn't it? Your code starts with values in (0,1), and then does
X[i] = X[i] * (5.12--5.12) - -5.12
100 times, which is the rough equivalent of multiplying by 10^100, which is then followed by
X[i] = X[i] * (2.048--2.048) - -2.048
100 times, which is the rough equivalent of multiplying by 4^100, finally followed by
X[i] = X[i] * (5.12--5.12) - -5.12
again. So your results should be of the order of 1E250, which is much larger than the maximum 3.4E38 which is the rough upper limit of representable numbers in IEEE 754 single precision.

Resources