In the code bellow , Why we need slices ? and what does it for ?
//https://github.com/danginsburg/opengles-book-samples/blob/604a02cc84f9cc4369f7efe93d2a1d7f2cab2ba7/iPhone/Common/esUtil.h#L110
int esGenSphere(int numSlices, float radius, float **vertices,
float **texCoords, uint16_t **indices, int *numVertices_out) {
int numParallels = numSlices / 2;
int numVertices = (numParallels + 1) * (numSlices + 1);
int numIndices = numParallels * numSlices * 6;
float angleStep = (2.0f * ES_PI) / ((float) numSlices);
if (vertices != NULL) {
*vertices = malloc(sizeof(float) * 3 * numVertices);
}
if (texCoords != NULL) {
*texCoords = malloc(sizeof(float) * 2 * numVertices);
}
if (indices != NULL) {
*indices = malloc(sizeof(uint16_t) * numIndices);
}
for (int i = 0; i < numParallels + 1; i++) {
for (int j = 0; j < numSlices + 1; j++) {
int vertex = (i * (numSlices + 1) + j) * 3;
if (vertices) {
(*vertices)[vertex + 0] = radius * sinf(angleStep * (float)i) * sinf(angleStep * (float)j);
(*vertices)[vertex + 1] = radius * cosf(angleStep * (float)i);
(*vertices)[vertex + 2] = radius * sinf(angleStep * (float)i) * cosf(angleStep * (float)j);
}
if (texCoords) {
int texIndex = (i * (numSlices + 1) + j) * 2;
(*texCoords)[texIndex + 0] = (float)j / (float)numSlices;
(*texCoords)[texIndex + 1] = 1.0f - ((float)i / (float)numParallels);
}
}
}
// Generate the indices
if (indices != NULL) {
uint16_t *indexBuf = (*indices);
for (int i = 0; i < numParallels ; i++) {
for (int j = 0; j < numSlices; j++) {
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + (j + 1);
}
}
}
if (numVertices_out) {
*numVertices_out = numVertices;
}
return numIndices;
}
That code generates a sphere mesh that looks like this:
Source: https://commons.wikimedia.org/wiki/File:Sphere_wireframe_10deg_6r.svg CC BY 3.0
As you can see in the picture, there are horizontal parallel lines, and vertical lines which all meet at the poles. The horizontal lines are typically called parallels whereas the vertical ones are called meridians. The author of that code apparently didn't know this term, so they called it "slices" instead.
Related
I performed two convolution using constant memory for mask.
One without tiling in global memory:
__global__ void constGradientConvolution(uint8_t* inputImgData, uint8_t* gradientImgData, int w, int h) {
// Calculate the global thread positions
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Starting index for calculation
int start_r = row - SOBEL_OP_RADIUS;
int start_c = col - SOBEL_OP_RADIUS;
// Temp value for calculation
int temp = 0;
// Iterate over all the rows
for (int i = 0; i < SOBEL_OP_DIM; i++) {
// Go over each column
for (int j = 0; j < SOBEL_OP_DIM; j++) {
// Range check for rows
if ((start_r + i) >= 0 && (start_r + i) < h) {
// Range check for columns
if ((start_c + j) >= 0 && (start_c + j) < w) {
// Accumulate result
temp += inputImgData[(start_r + i) * w + (start_c + j)] *
constMask[i * SOBEL_OP_DIM + j];
}
}
}
}
// Write back the result
gradientImgData[row * w + col] = (uint8_t)abs(temp);
}
and one with tiling, loading in shared memory, credits to https://www.cstechera.com/2015/07/two-dimensional-2d-image-convolution-in-CUDA.html:
__global__ void tiledGradientConvolution(uint8_t* inputImgData, uint8_t* gradientImgData, int width, int height) {
__shared__ uint8_t N_ds[SharedDim_y][SharedDim_x];
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / SharedDim_x, destX = dest % SharedDim_x,
srcY = blockIdx.y * TILE_HEIGHT + destY - SOBEL_OP_RADIUS,
srcX = blockIdx.x * TILE_WIDTH + destX - SOBEL_OP_RADIUS,
src = (srcY * width + srcX);
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = inputImgData[src];
else
N_ds[destY][destX] = 0;
for (int iter = 1; iter <= (SharedDim_x * SharedDim_y) / (TILE_WIDTH * TILE_HEIGHT); iter++)
{
// other batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + iter * (TILE_WIDTH * TILE_HEIGHT);
destY = dest / SharedDim_x, destX = dest % SharedDim_x;
srcY = blockIdx.y * TILE_HEIGHT + destY - SOBEL_OP_RADIUS;
srcX = blockIdx.x * TILE_WIDTH + destX - SOBEL_OP_RADIUS;
src = (srcY * width + srcX);
if (destY < SharedDim_y && destX < SharedDim_x)
{
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = inputImgData[src];
else
N_ds[destY][destX] = 0;
}
}
__syncthreads();
int temp = 0;
int y, x;
for (y = 0; y < SOBEL_OP_DIM; y++)
for (x = 0; x < SOBEL_OP_DIM; x++)
temp += N_ds[threadIdx.y + y][threadIdx.x + x] * constMask[y * SOBEL_OP_DIM + x];
y = blockIdx.y * TILE_HEIGHT + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width) {
gradientImgData[y * width + x] = (uint8_t)abs(temp);
}
__syncthreads();
}
according to nvprof shared memory implementations is slower:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 53.27% 387.52us 2 193.76us 190.70us 196.82us [CUDA memcpy DtoH]
24.28% 176.62us 2 88.311us 608ns 176.01us [CUDA memcpy HtoD]
11.56% 84.102us 1 84.102us 84.102us 84.102us tiledGradientConvolution(unsigned char*, unsigned char*, int, int)
10.90% 79.270us 1 79.270us 79.270us 79.270us constGradientConvolution(unsigned char*, unsigned char*, int, int)
this is the configuration kernel:
#define SOBEL_OP_DIM 3
#define SOBEL_OP_RADIUS (SOBEL_OP_DIM / 2)
// tile dimension
#define TILE_WIDTH 16
#define TILE_HEIGHT 16
// Allocate masks in constant memory
__constant__ int constMask[SOBEL_OP_DIM * SOBEL_OP_DIM];
// Shared Memory Elements needed to be loaded as per Mask Size
#define SharedDim_x (TILE_WIDTH + SOBEL_OP_DIM - 1)
#define SharedDim_y (TILE_HEIGHT + SOBEL_OP_DIM - 1)
// in main code//
dim3 dimBlock(TILE_WIDTH, TILE_HEIGHT);
dim3 dimGrid((test.w + TILE_WIDTH - 1) / TILE_WIDTH, (test.h + TILE_HEIGHT - 1) / TILE_HEIGHT);
I expect shared memory to be faster, but i can figure out what cause conflincts in loading from global memory.
Any help would be appreciated. Thanks you in advance.
I tried to create Fill and Eraser Tools by GDI but this way is too slow for windows phone devices and work with large photos with this method is too hard for this divices.
I search for Alternative soulation for Image Processing and find Win2D and Sharpdx but not sure These Api can help me for create these tools.
this is Fill tool in winrtxamltoolkit
public static void FloodFill(this WriteableBitmap target, int x, int y, int outlineColor, int fillColor, byte maxdiff)
{
var width = target.PixelWidth;
var height = target.PixelHeight;
var queue = new List<Pnt>();
using (var context = target.GetBitmapContext(ReadWriteMode.ReadWrite))
{
queue.Add(new Pnt { X = x, Y = y });
while (queue.Count > 0)
{
var p = queue[queue.Count - 1];
queue.RemoveAt(queue.Count - 1);
if (p.X == -1) continue;
if (p.X == width) continue;
if (p.Y == -1) continue;
if (p.Y == height) continue;
if (context.Pixels[width * p.Y + p.X] == outlineColor) continue;
if (context.Pixels[width * p.Y + p.X] == fillColor) continue;
if (context.Pixels[width * p.Y + p.X].MaxDiff(outlineColor) > maxdiff)
{
context.Pixels[width * p.Y + p.X] = fillColor;
}
else
{
continue;
}
context.Pixels[width * p.Y + p.X] = fillColor;
queue.Add(new Pnt { X = p.X, Y = p.Y - 1 });
queue.Add(new Pnt { X = p.X + 1, Y = p.Y });
queue.Add(new Pnt { X = p.X, Y = p.Y + 1 });
queue.Add(new Pnt { X = p.X - 1, Y = p.Y });
}
target.Invalidate();
}
}
and this is Ereaser tool in WriteableBitmapEx
public static void FillEllipseCenteredTrsnceparent(this WriteableBitmap bmp, int xc, int yc, int xr, int yr, Color color)
{
using (BitmapContext context = bmp.GetBitmapContext())
{
Func<Color, int> toInt32 = c =>
{
var i = ((((c.A << 0x18) | (((c.R * c.A + 1) >> 8) << 0x10)) | (((c.G * c.A + 1) >> 8) << 8)) | ((c.B * c.A + 1) >> 8));
return i;
};
int[] pixels = context.Pixels;
int width = context.Width;
int height = context.Height;
if ((xr >= 1) && (yr >= 1))
{
int num3;
int num4;
int num5;
int num6;
int num7;
int num8;
int num9 = xr;
int num10 = 0;
int num11 = (xr * xr) << 1;
int num12 = (yr * yr) << 1;
int num13 = (yr * yr) * (1 - (xr << 1));
int num14 = xr * xr;
int num15 = 0;
int num16 = num12 * xr;
int num17 = 0;
//int sa = (color >> 0x18) & 0xff;
//int sr = (color >> 0x10) & 0xff;
//int sg = (color >> 8) & 0xff;
//int sb = color & 0xff;
//bool flag = !doAlphaBlend || (sa == 0xff);
while (num16 >= num17)
{
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num8 = xc + num9;
num7 = xc - num9;
if (num8 < 0)
{
num8 = 0;
}
if (num8 >= width)
{
num8 = width - 1;
}
if (num7 < 0)
{
num7 = 0;
}
if (num7 >= width)
{
num7 = width - 1;
}
for (int i = num7; i <= num8; i++)
{
pixels[i + num3] = toInt32(color);
pixels[i + num4] = toInt32(color);
}
num10++;
num17 += num11;
num15 += num14;
num14 += num11;
if ((num13 + (num15 << 1)) > 0)
{
num9--;
num16 -= num12;
num15 += num13;
num13 += num12;
}
}
num9 = 0;
num10 = yr;
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num13 = yr * yr;
num14 = (xr * xr) * (1 - (yr << 1));
num15 = 0;
num16 = 0;
num17 = num11 * yr;
while (num16 <= num17)
{
num8 = xc + num9;
num7 = xc - num9;
if (num8 < 0)
{
num8 = 0;
}
if (num8 >= width)
{
num8 = width - 1;
}
if (num7 < 0)
{
num7 = 0;
}
if (num7 >= width)
{
num7 = width - 1;
}
for (int j = num7; j <= num8; j++)
{
pixels[j + num3] = toInt32(color);
pixels[j + num4] = toInt32(color);
}
num9++;
num16 += num12;
num15 += num13;
num13 += num12;
if ((num14 + (num15 << 1)) > 0)
{
num10--;
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num17 -= num11;
num15 += num14;
num14 += num11;
}
}
}
}
}
Is there a way to conver this code to win2d or Sharpdx?
I am getting the Logic error when I analyzed my code. It is saying that "Argument in message expression is an uninitialized value"
Here is what I have
// allocate symbol
int baseMatrixSize = compact ? 11 + layers * 4 : 14 + layers * 4; // not including alignment lines
int alignmentMap[baseMatrixSize];
int matrixSize;
if (compact) {
// no alignment marks in compact mode, alignmentMap is a no-op
matrixSize = baseMatrixSize;
for (int i = 0; i < baseMatrixSize; i++) {
alignmentMap[i] = i;
}
} else {
matrixSize = baseMatrixSize + 1 + 2 * ((baseMatrixSize / 2 - 1) / 15);
int origCenter = baseMatrixSize / 2;
int center = matrixSize / 2;
for (int i = 0; i < origCenter; i++) {
int newOffset = i + i / 15;
alignmentMap[origCenter - i - 1] = center - newOffset - 1;
alignmentMap[origCenter + i] = center + newOffset + 1;
}
}
ZXBitMatrix *matrix = [[ZXBitMatrix alloc] initWithDimension:matrixSize];
// draw data bits
for (int i = 0, rowOffset = 0; i < layers; i++) {
int rowSize = compact ? (layers - i) * 4 + 9 : (layers - i) * 4 + 12;
for (int j = 0; j < rowSize; j++) {
int columnOffset = j * 2;
for (int k = 0; k < 2; k++) {
if ([messageBits get:rowOffset + columnOffset + k]) {
[matrix setX:alignmentMap[i * 2 + k] y:alignmentMap[i * 2 + j]];
}
if ([messageBits get:rowOffset + rowSize * 2 + columnOffset + k]) {
[matrix setX:alignmentMap[i * 2 + j] y:alignmentMap[baseMatrixSize - 1 - i * 2 - k]];
}
if ([messageBits get:rowOffset + rowSize * 4 + columnOffset + k]) {
[matrix setX:alignmentMap[baseMatrixSize - 1 - i * 2 - k] y:alignmentMap[baseMatrixSize - 1 - i * 2 - j]];
}
if ([messageBits get:rowOffset + rowSize * 6 + columnOffset + k]) {
[matrix setX:alignmentMap[baseMatrixSize - 1 - i * 2 - j] y:alignmentMap[i * 2 + k]];
}
}
}
rowOffset += rowSize * 8;
}
I tried by initializing the matrizSize with 0,but it still gives me the error .
Could you please tell me ,why I am getting this error?
Hello I am trying to convert an ARGB 8888 image into yuv 420 sp in android and I am getting a totally greenish and compressed image.Please help me in code if I am doing it the correct way.
The code seems something as below.
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
int R, G, B;
int Y, U, V;
int index = 0;
for (int j = 0; j < screenHeight; j++) {
for (int i = 0; i < screenWidth; i++) {
int pp = (j * screenWidth + i) * 4;
//a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
Update:
Screenshot illustrating the problem:
I think I have made some changes and it looks some what as the original image but it is not clear or legible.Can I get some ideas on how to make it almost as the original image
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uvIndex=frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
int pp = (j * width + i) * 4;
R = argb[pp+ 0];
G = argb[pp + 1];
B = argb[pp + 2];
a = argb[pp + 3];
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
You're not storing the YUV data correctly. According to this document, YUV420SP data is stored in two planes, one containing the Y data, and another containing the interleaved U and V data:
| Y_0 | Y_1 | Y_2 | Y_3 | Y_4 | ... | Y_w-2 | Y_w-1 | /* h rows */
| Y_w | Y_w+1 | Y_w+2 | ...
:
:
| U_0 | V_0 | U_2 | V_2 | U_4 | ... | U_w-2 | V_w-2 | /* h/2 rows */
| U_2w | V_2w | U_2w+2| ...
:
:
Your code seems to be storing the U and V data in separate planes:
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
I'm new to image processing.When I tried the histogram equalization algorithm,I got an error which I can't explain,just as the picture shows below.
I'm sorry that I can't upload a picture right now.I'll use picasa instead.
https://picasaweb.google.com/lh/photo/xoxhWR7waVp50uLh-Ko78C_rLSYVGhhn6l5Yer6Tngc?feat=directlink
The original picture is on the left.
My algorithm is turn RGB into YCbCr and equalize Y,leave Cb and Cr be.Then convert YCbCr into RGB again to show the picture after equalization.
Here's the conversion code.
void MainWindow::rGBToYCbCr(uchar *bmpArray,uchar *lumaArray,uchar *cBCrArray,int startPoint)
{
for(int i = 0,m = 0,n = 0; i < bmpWidth * bmpHeight; i++,m+=3,n+=2)
{
lumaArray[i] = (uchar)(0.299 * bmpArray[startPoint + m + 2] + 0.587 *bmpArray[startPoint + m + 1] + 0.115 * bmpArray[startPoint + m]);
cBCrArray[n] = (uchar)(-0.169 * bmpArray[startPoint + m + 2] - 0.331 * bmpArray[startPoint + m + 1] + 0.5 * bmpArray[startPoint + m]) + 128;//cb
cBCrArray[n+1] = (uchar)(0.5 * bmpArray[startPoint + m + 2] - 0.419 * bmpArray[startPoint + m + 1] - 0.081 * bmpArray[startPoint + m]) + 128;//cr
}
}
void MainWindow::yCbCrToRGB(uchar *lumaArray,uchar *targetArray,uchar *CbCrArray,int startPoint)
{
for(int i = 0,m = 0,n=0; i < 3 * bmpWidth * bmpHeight; i+=3,m++,n+=2)
{
targetArray[startPoint + i + 2] = lumaArray[m] + (uchar)(1.402 * (CbCrArray[n + 1] - 128));
targetArray[startPoint + i + 1] = lumaArray[m] - (uchar)(0.344 * (CbCrArray[n] - 128)) - (uchar)(0.714 * (CbCrArray[n + 1] - 128));
targetArray[startPoint + i] = lumaArray[m] + (uchar)(1.772 * (CbCrArray[n] - 128));
}
}
And here is the equlization algorithm.
void MainWindow::histogramEqulizeGrayScale(uchar *bmpArray,int startPoint)
{
int hisTimes[256]={0};
for(int i = 0; i < bmpWidth * bmpHeight; i++)
hisTimes[(int)bmpArray[startPoint + i]]++;
double pixmapProbability[256];
for(int i = 0; i < 256; i++)
{
int sum = 0;
for(int j = 0; j <= i; j++)
sum += hisTimes[j];
pixmapProbability[i] = (double)sum / (double)(bmpWidth * bmpHeight);
}
for(int i = 0; i < 256; i++)
hisTimes[i] = pixmapProbability[i] * 256;
for(int i = 0; i < bmpWidth * bmpHeight; i++)
bmpArray[startPoint + i] = hisTimes[bmpArray[startPoint + i]];
}
Why would the color become so terrible?
could it be that your color values are overflowing?
You have to clamp the results of your calculations for each color to [0,255]