Logic Error: Argument in message expression is an uninitialized value - ios

I am getting the Logic error when I analyzed my code. It is saying that "Argument in message expression is an uninitialized value"
Here is what I have
// allocate symbol
int baseMatrixSize = compact ? 11 + layers * 4 : 14 + layers * 4; // not including alignment lines
int alignmentMap[baseMatrixSize];
int matrixSize;
if (compact) {
// no alignment marks in compact mode, alignmentMap is a no-op
matrixSize = baseMatrixSize;
for (int i = 0; i < baseMatrixSize; i++) {
alignmentMap[i] = i;
}
} else {
matrixSize = baseMatrixSize + 1 + 2 * ((baseMatrixSize / 2 - 1) / 15);
int origCenter = baseMatrixSize / 2;
int center = matrixSize / 2;
for (int i = 0; i < origCenter; i++) {
int newOffset = i + i / 15;
alignmentMap[origCenter - i - 1] = center - newOffset - 1;
alignmentMap[origCenter + i] = center + newOffset + 1;
}
}
ZXBitMatrix *matrix = [[ZXBitMatrix alloc] initWithDimension:matrixSize];
// draw data bits
for (int i = 0, rowOffset = 0; i < layers; i++) {
int rowSize = compact ? (layers - i) * 4 + 9 : (layers - i) * 4 + 12;
for (int j = 0; j < rowSize; j++) {
int columnOffset = j * 2;
for (int k = 0; k < 2; k++) {
if ([messageBits get:rowOffset + columnOffset + k]) {
[matrix setX:alignmentMap[i * 2 + k] y:alignmentMap[i * 2 + j]];
}
if ([messageBits get:rowOffset + rowSize * 2 + columnOffset + k]) {
[matrix setX:alignmentMap[i * 2 + j] y:alignmentMap[baseMatrixSize - 1 - i * 2 - k]];
}
if ([messageBits get:rowOffset + rowSize * 4 + columnOffset + k]) {
[matrix setX:alignmentMap[baseMatrixSize - 1 - i * 2 - k] y:alignmentMap[baseMatrixSize - 1 - i * 2 - j]];
}
if ([messageBits get:rowOffset + rowSize * 6 + columnOffset + k]) {
[matrix setX:alignmentMap[baseMatrixSize - 1 - i * 2 - j] y:alignmentMap[i * 2 + k]];
}
}
}
rowOffset += rowSize * 8;
}
I tried by initializing the matrizSize with 0,but it still gives me the error .
Could you please tell me ,why I am getting this error?

Related

CUDA tiled 2D Convolution in shared memory is slower than global memory

I performed two convolution using constant memory for mask.
One without tiling in global memory:
__global__ void constGradientConvolution(uint8_t* inputImgData, uint8_t* gradientImgData, int w, int h) {
// Calculate the global thread positions
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Starting index for calculation
int start_r = row - SOBEL_OP_RADIUS;
int start_c = col - SOBEL_OP_RADIUS;
// Temp value for calculation
int temp = 0;
// Iterate over all the rows
for (int i = 0; i < SOBEL_OP_DIM; i++) {
// Go over each column
for (int j = 0; j < SOBEL_OP_DIM; j++) {
// Range check for rows
if ((start_r + i) >= 0 && (start_r + i) < h) {
// Range check for columns
if ((start_c + j) >= 0 && (start_c + j) < w) {
// Accumulate result
temp += inputImgData[(start_r + i) * w + (start_c + j)] *
constMask[i * SOBEL_OP_DIM + j];
}
}
}
}
// Write back the result
gradientImgData[row * w + col] = (uint8_t)abs(temp);
}
and one with tiling, loading in shared memory, credits to https://www.cstechera.com/2015/07/two-dimensional-2d-image-convolution-in-CUDA.html:
__global__ void tiledGradientConvolution(uint8_t* inputImgData, uint8_t* gradientImgData, int width, int height) {
__shared__ uint8_t N_ds[SharedDim_y][SharedDim_x];
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / SharedDim_x, destX = dest % SharedDim_x,
srcY = blockIdx.y * TILE_HEIGHT + destY - SOBEL_OP_RADIUS,
srcX = blockIdx.x * TILE_WIDTH + destX - SOBEL_OP_RADIUS,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = inputImgData[src];
else
N_ds[destY][destX] = 0;
for (int iter = 1; iter <= (SharedDim_x * SharedDim_y) / (TILE_WIDTH * TILE_HEIGHT); iter++)
{
// other batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + iter * (TILE_WIDTH * TILE_HEIGHT);
destY = dest / SharedDim_x, destX = dest % SharedDim_x;
srcY = blockIdx.y * TILE_HEIGHT + destY - SOBEL_OP_RADIUS;
srcX = blockIdx.x * TILE_WIDTH + destX - SOBEL_OP_RADIUS;
src = (srcY * width + srcX);
if (destY < SharedDim_y && destX < SharedDim_x)
{
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = inputImgData[src];
else
N_ds[destY][destX] = 0;
}
}
__syncthreads();
int temp = 0;
int y, x;
for (y = 0; y < SOBEL_OP_DIM; y++)
for (x = 0; x < SOBEL_OP_DIM; x++)
temp += N_ds[threadIdx.y + y][threadIdx.x + x] * constMask[y * SOBEL_OP_DIM + x];
y = blockIdx.y * TILE_HEIGHT + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width) {
gradientImgData[y * width + x] = (uint8_t)abs(temp);
}
__syncthreads();
}
according to nvprof shared memory implementations is slower:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 53.27% 387.52us 2 193.76us 190.70us 196.82us [CUDA memcpy DtoH]
24.28% 176.62us 2 88.311us 608ns 176.01us [CUDA memcpy HtoD]
11.56% 84.102us 1 84.102us 84.102us 84.102us tiledGradientConvolution(unsigned char*, unsigned char*, int, int)
10.90% 79.270us 1 79.270us 79.270us 79.270us constGradientConvolution(unsigned char*, unsigned char*, int, int)
this is the configuration kernel:
#define SOBEL_OP_DIM 3
#define SOBEL_OP_RADIUS (SOBEL_OP_DIM / 2)
// tile dimension
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Allocate masks in constant memory
__constant__ int constMask[SOBEL_OP_DIM * SOBEL_OP_DIM];
// Shared Memory Elements needed to be loaded as per Mask Size
#define SharedDim_x (TILE_WIDTH + SOBEL_OP_DIM - 1)
#define SharedDim_y (TILE_HEIGHT + SOBEL_OP_DIM - 1)
// in main code//
dim3 dimBlock(TILE_WIDTH, TILE_HEIGHT);
dim3 dimGrid((test.w + TILE_WIDTH - 1) / TILE_WIDTH, (test.h + TILE_HEIGHT - 1) / TILE_HEIGHT);
I expect shared memory to be faster, but i can figure out what cause conflincts in loading from global memory.
Any help would be appreciated. Thanks you in advance.

What is slices in OpenGL?

In the code bellow , Why we need slices ? and what does it for ?
//https://github.com/danginsburg/opengles-book-samples/blob/604a02cc84f9cc4369f7efe93d2a1d7f2cab2ba7/iPhone/Common/esUtil.h#L110
int esGenSphere(int numSlices, float radius, float **vertices,
float **texCoords, uint16_t **indices, int *numVertices_out) {
int numParallels = numSlices / 2;
int numVertices = (numParallels + 1) * (numSlices + 1);
int numIndices = numParallels * numSlices * 6;
float angleStep = (2.0f * ES_PI) / ((float) numSlices);
if (vertices != NULL) {
*vertices = malloc(sizeof(float) * 3 * numVertices);
}
if (texCoords != NULL) {
*texCoords = malloc(sizeof(float) * 2 * numVertices);
}
if (indices != NULL) {
*indices = malloc(sizeof(uint16_t) * numIndices);
}
for (int i = 0; i < numParallels + 1; i++) {
for (int j = 0; j < numSlices + 1; j++) {
int vertex = (i * (numSlices + 1) + j) * 3;
if (vertices) {
(*vertices)[vertex + 0] = radius * sinf(angleStep * (float)i) * sinf(angleStep * (float)j);
(*vertices)[vertex + 1] = radius * cosf(angleStep * (float)i);
(*vertices)[vertex + 2] = radius * sinf(angleStep * (float)i) * cosf(angleStep * (float)j);
}
if (texCoords) {
int texIndex = (i * (numSlices + 1) + j) * 2;
(*texCoords)[texIndex + 0] = (float)j / (float)numSlices;
(*texCoords)[texIndex + 1] = 1.0f - ((float)i / (float)numParallels);
}
}
}
// Generate the indices
if (indices != NULL) {
uint16_t *indexBuf = (*indices);
for (int i = 0; i < numParallels ; i++) {
for (int j = 0; j < numSlices; j++) {
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + (j + 1);
}
}
}
if (numVertices_out) {
*numVertices_out = numVertices;
}
return numIndices;
}
That code generates a sphere mesh that looks like this:
Source: https://commons.wikimedia.org/wiki/File:Sphere_wireframe_10deg_6r.svg CC BY 3.0
As you can see in the picture, there are horizontal parallel lines, and vertical lines which all meet at the poles. The horizontal lines are typically called parallels whereas the vertical ones are called meridians. The author of that code apparently didn't know this term, so they called it "slices" instead.

converting an ARGB 8888 image into yuv 420 sp in android causing greenish imag

Hello I am trying to convert an ARGB 8888 image into yuv 420 sp in android and I am getting a totally greenish and compressed image.Please help me in code if I am doing it the correct way.
The code seems something as below.
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
int R, G, B;
int Y, U, V;
int index = 0;
for (int j = 0; j < screenHeight; j++) {
for (int i = 0; i < screenWidth; i++) {
int pp = (j * screenWidth + i) * 4;
//a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
Update:
Screenshot illustrating the problem:
I think I have made some changes and it looks some what as the original image but it is not clear or legible.Can I get some ideas on how to make it almost as the original image
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uvIndex=frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
int pp = (j * width + i) * 4;
R = argb[pp+ 0];
G = argb[pp + 1];
B = argb[pp + 2];
a = argb[pp + 3];
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
You're not storing the YUV data correctly. According to this document, YUV420SP data is stored in two planes, one containing the Y data, and another containing the interleaved U and V data:
| Y_0 | Y_1 | Y_2 | Y_3 | Y_4 | ... | Y_w-2 | Y_w-1 | /* h rows */
| Y_w | Y_w+1 | Y_w+2 | ...
:
:
| U_0 | V_0 | U_2 | V_2 | U_4 | ... | U_w-2 | V_w-2 | /* h/2 rows */
| U_2w | V_2w | U_2w+2| ...
:
:
Your code seems to be storing the U and V data in separate planes:
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);

How to perform skin tone matching

( face )
( body )
Hi, i am new to image processing and openCV C/C++. I am wondering that is it possible to extract skin tone from the first image (face). And then applied to the second image (body).
In other words, user upload his face image and the program extract the skin tone from that image and apply it to the body.
Thanks,
Aisha
This is a hard problem to solve, especially given the variation of colours depending on lighting and reflection. I have worked previously on finding skin in images, and generally the Cr (chroma red) component of the YCbCr colour space stands out prominently on skin. You might be able to exploit this information to find skin regions.
Here are a couple of papers that attempt to use colour for locating human skin:
1. Interaction between hands and wearable cameras
2. Markerless inspection of augmented reality objects
For finding skin you can use one of this formulas:
1) With normilized RGB space:
for(int i = 0; i < m_image->height; ++i)
{
for(int j = 0; j < m_image->width; ++j)
{
if (m_image->nChannels == 3)
{
int valueR = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3 + 2];
int valueG = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3 + 1];
int valueB = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3];
float normR = static_cast<float>(valueR) / static_cast<float>(valueR + valueG + valueB);
float normG = static_cast<float>(valueG) / static_cast<float>(valueR + valueG + valueB);
float normB = static_cast<float>(valueB) / static_cast<float>(valueR + valueG + valueB);
if ((normB / normG < 1.249) &&
(( normR + normG + normB ) / ( 3 * normR ) > 0.696 ) &&
( 1/3.0 - normB/( normR + normG + normB ) > 0.014 ) &&
(normG/(3* (normR + normG + normB)) < 0.108 ))
{
//pixel is skin
}
}
}
2) in RGB space:
for(size_t i = 0; i < m_image->height; ++i)
{
for(size_t j = 0; j < m_image->width; ++j)
{
if (m_image->nChannels == 3)
{
int R = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3 + 2];
int G = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3 + 1];
int B = (reinterpret_cast<uchar*>(m_image->imageData + i * m_image->widthStep))[j * 3];
if (( R > 95) && ( G > 40 ) && ( B > 20 ) &&
(std::max(R, std::max( G, B) ) - std::min(R, std::min(G, B) ) > 15) &&
(std::abs(R - G) > 15) && (R > G) && (R > B))
{
//skin pixel
}
}
}
3) in YCrCb space:
for(size_t i = 0; i < m_image->height; ++i)
{
for(size_t j = 0; j < m_image->width; ++j)
{
if (m_image->nChannels == 3)
{
int Cr = (reinterpret_cast<uchar*>(image->imageData + i * image->widthStep))[j * 3 + 2];
int Cb = (reinterpret_cast<uchar*>(image->imageData + i * image->widthStep))[j * 3 + 1];
int Y = (reinterpret_cast<uchar*>(image->imageData + i * image->widthStep))[j * 3];
if (( Y > 80 ) && ( Cb > 85 ) && ( Cb < 135 ) &&
(Cr > 135) && (Cr < 180))
{
//skin pixel
}
}
}
}

Get puzzled on histogram equalization in Qt

I'm new to image processing.When I tried the histogram equalization algorithm,I got an error which I can't explain,just as the picture shows below.
I'm sorry that I can't upload a picture right now.I'll use picasa instead.
https://picasaweb.google.com/lh/photo/xoxhWR7waVp50uLh-Ko78C_rLSYVGhhn6l5Yer6Tngc?feat=directlink
The original picture is on the left.
My algorithm is turn RGB into YCbCr and equalize Y,leave Cb and Cr be.Then convert YCbCr into RGB again to show the picture after equalization.
Here's the conversion code.
void MainWindow::rGBToYCbCr(uchar *bmpArray,uchar *lumaArray,uchar *cBCrArray,int startPoint)
{
for(int i = 0,m = 0,n = 0; i < bmpWidth * bmpHeight; i++,m+=3,n+=2)
{
lumaArray[i] = (uchar)(0.299 * bmpArray[startPoint + m + 2] + 0.587 *bmpArray[startPoint + m + 1] + 0.115 * bmpArray[startPoint + m]);
cBCrArray[n] = (uchar)(-0.169 * bmpArray[startPoint + m + 2] - 0.331 * bmpArray[startPoint + m + 1] + 0.5 * bmpArray[startPoint + m]) + 128;//cb
cBCrArray[n+1] = (uchar)(0.5 * bmpArray[startPoint + m + 2] - 0.419 * bmpArray[startPoint + m + 1] - 0.081 * bmpArray[startPoint + m]) + 128;//cr
}
}
void MainWindow::yCbCrToRGB(uchar *lumaArray,uchar *targetArray,uchar *CbCrArray,int startPoint)
{
for(int i = 0,m = 0,n=0; i < 3 * bmpWidth * bmpHeight; i+=3,m++,n+=2)
{
targetArray[startPoint + i + 2] = lumaArray[m] + (uchar)(1.402 * (CbCrArray[n + 1] - 128));
targetArray[startPoint + i + 1] = lumaArray[m] - (uchar)(0.344 * (CbCrArray[n] - 128)) - (uchar)(0.714 * (CbCrArray[n + 1] - 128));
targetArray[startPoint + i] = lumaArray[m] + (uchar)(1.772 * (CbCrArray[n] - 128));
}
}
And here is the equlization algorithm.
void MainWindow::histogramEqulizeGrayScale(uchar *bmpArray,int startPoint)
{
int hisTimes[256]={0};
for(int i = 0; i < bmpWidth * bmpHeight; i++)
hisTimes[(int)bmpArray[startPoint + i]]++;
double pixmapProbability[256];
for(int i = 0; i < 256; i++)
{
int sum = 0;
for(int j = 0; j <= i; j++)
sum += hisTimes[j];
pixmapProbability[i] = (double)sum / (double)(bmpWidth * bmpHeight);
}
for(int i = 0; i < 256; i++)
hisTimes[i] = pixmapProbability[i] * 256;
for(int i = 0; i < bmpWidth * bmpHeight; i++)
bmpArray[startPoint + i] = hisTimes[bmpArray[startPoint + i]];
}
Why would the color become so terrible?
could it be that your color values are overflowing?
You have to clamp the results of your calculations for each color to [0,255]

Resources