This code seems working but it's not
- (IBAction)caculate:(UIButton *)sender {
int x , h;
double y , p;
x = [textDegree.text intValue ] ;
{
if ((x >= 60 ) && (x < 65)) {
y = 1;
} else if (( x >= 66) && (x < 70 )) {
y =1.50;
} else if (( x >= 70) && (x < 75 )) {
y = 2;
} else if (( x >= 75) && (x < 80 )) {
y = 2.50 ;
} else if (( x >= 80) && (x < 85 )) {
y = 3 ;
} else if (( x >= 85) && (x < 90 )) {
y = 3.50 ;
} else if (( x >= 90) && (x < 95 )) {
y = 3.75 ;
} else if (( x >= 95) && (x <= 100 )) {
y = 4 ;
} else
y = 0 ;
}
h = [textHour.text intValue];
p = y * h ;
point.text = [NSString stringWithFormat:#"%f", p];
}
when I simulate it, it take me out and says
x = [textDegree.text intValue ] ;
thread 1 :breakpoint7.1
Related
I am trying to create a method that will link all Cell objects set up in a 2D array named CellGrid[,]
My question is: Since most of the code in SetDirection() is so similar, it seem there is a better way to achieve my goal.
(Side note: This is functional but the execution feels "off" )
private void SetDirection()
{
int x = 0, y = 0;
for ( int i = 0 ; i < Size * (Size - 1);)//setting all UP pointers
{
if ( i == 0 ) { x = 0; y = 1;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y-1] , Direction.Up );
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all DOWN pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y+1], Direction.Down);
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);)//setting all LEFT pointers
{
if ( i == 0 ) { x = 1; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x-1,y], Direction.Left);
i++;
}
x++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all RIGHT pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x+1,y], Direction.Right);
i++;
}
x++;
}
}
public void SetPointer( Cell cellRef ,GridBuilder.Direction dir)
{
switch ( dir )
{
case GridBuilder.Direction.Up:
this.Up = cellRef;
break;
case GridBuilder.Direction.Down:
this.Down = cellRef;
break;
case GridBuilder.Direction.Left:
this.Left = cellRef;
break;
case GridBuilder.Direction.Right:
this.Right = cellRef;
break;
}
}
You can indeed use one set of loops to make the links in all four directions. This is based on two ideas:
When setting a link, immediately set the link in the opposite direction between the same two cells.
When setting a link, immediately set the link between the two cells that are located in the mirrored positions -- mirrored by the main diagonal (x <--> y).
private void SetDirection() {
for (int i = 1; i < Size; i++) {
for (int j = 0; j < Size; j++) {
CellGrid[i, j].SetPointer(CellGrid[i-1, j], Direction.Left);
CellGrid[i-1, j].SetPointer(CellGrid[i, j], Direction.Right);
CellGrid[j, i].SetPointer(CellGrid[j, i-1], Direction.Up);
CellGrid[j, i-1].SetPointer(CellGrid[j, i], Direction.Down);
}
}
}
I tried to create Fill and Eraser Tools by GDI but this way is too slow for windows phone devices and work with large photos with this method is too hard for this divices.
I search for Alternative soulation for Image Processing and find Win2D and Sharpdx but not sure These Api can help me for create these tools.
this is Fill tool in winrtxamltoolkit
public static void FloodFill(this WriteableBitmap target, int x, int y, int outlineColor, int fillColor, byte maxdiff)
{
var width = target.PixelWidth;
var height = target.PixelHeight;
var queue = new List<Pnt>();
using (var context = target.GetBitmapContext(ReadWriteMode.ReadWrite))
{
queue.Add(new Pnt { X = x, Y = y });
while (queue.Count > 0)
{
var p = queue[queue.Count - 1];
queue.RemoveAt(queue.Count - 1);
if (p.X == -1) continue;
if (p.X == width) continue;
if (p.Y == -1) continue;
if (p.Y == height) continue;
if (context.Pixels[width * p.Y + p.X] == outlineColor) continue;
if (context.Pixels[width * p.Y + p.X] == fillColor) continue;
if (context.Pixels[width * p.Y + p.X].MaxDiff(outlineColor) > maxdiff)
{
context.Pixels[width * p.Y + p.X] = fillColor;
}
else
{
continue;
}
context.Pixels[width * p.Y + p.X] = fillColor;
queue.Add(new Pnt { X = p.X, Y = p.Y - 1 });
queue.Add(new Pnt { X = p.X + 1, Y = p.Y });
queue.Add(new Pnt { X = p.X, Y = p.Y + 1 });
queue.Add(new Pnt { X = p.X - 1, Y = p.Y });
}
target.Invalidate();
}
}
and this is Ereaser tool in WriteableBitmapEx
public static void FillEllipseCenteredTrsnceparent(this WriteableBitmap bmp, int xc, int yc, int xr, int yr, Color color)
{
using (BitmapContext context = bmp.GetBitmapContext())
{
Func<Color, int> toInt32 = c =>
{
var i = ((((c.A << 0x18) | (((c.R * c.A + 1) >> 8) << 0x10)) | (((c.G * c.A + 1) >> 8) << 8)) | ((c.B * c.A + 1) >> 8));
return i;
};
int[] pixels = context.Pixels;
int width = context.Width;
int height = context.Height;
if ((xr >= 1) && (yr >= 1))
{
int num3;
int num4;
int num5;
int num6;
int num7;
int num8;
int num9 = xr;
int num10 = 0;
int num11 = (xr * xr) << 1;
int num12 = (yr * yr) << 1;
int num13 = (yr * yr) * (1 - (xr << 1));
int num14 = xr * xr;
int num15 = 0;
int num16 = num12 * xr;
int num17 = 0;
//int sa = (color >> 0x18) & 0xff;
//int sr = (color >> 0x10) & 0xff;
//int sg = (color >> 8) & 0xff;
//int sb = color & 0xff;
//bool flag = !doAlphaBlend || (sa == 0xff);
while (num16 >= num17)
{
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num8 = xc + num9;
num7 = xc - num9;
if (num8 < 0)
{
num8 = 0;
}
if (num8 >= width)
{
num8 = width - 1;
}
if (num7 < 0)
{
num7 = 0;
}
if (num7 >= width)
{
num7 = width - 1;
}
for (int i = num7; i <= num8; i++)
{
pixels[i + num3] = toInt32(color);
pixels[i + num4] = toInt32(color);
}
num10++;
num17 += num11;
num15 += num14;
num14 += num11;
if ((num13 + (num15 << 1)) > 0)
{
num9--;
num16 -= num12;
num15 += num13;
num13 += num12;
}
}
num9 = 0;
num10 = yr;
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num13 = yr * yr;
num14 = (xr * xr) * (1 - (yr << 1));
num15 = 0;
num16 = 0;
num17 = num11 * yr;
while (num16 <= num17)
{
num8 = xc + num9;
num7 = xc - num9;
if (num8 < 0)
{
num8 = 0;
}
if (num8 >= width)
{
num8 = width - 1;
}
if (num7 < 0)
{
num7 = 0;
}
if (num7 >= width)
{
num7 = width - 1;
}
for (int j = num7; j <= num8; j++)
{
pixels[j + num3] = toInt32(color);
pixels[j + num4] = toInt32(color);
}
num9++;
num16 += num12;
num15 += num13;
num13 += num12;
if ((num14 + (num15 << 1)) > 0)
{
num10--;
num5 = yc + num10;
num6 = yc - num10;
if (num5 < 0)
{
num5 = 0;
}
if (num5 >= height)
{
num5 = height - 1;
}
if (num6 < 0)
{
num6 = 0;
}
if (num6 >= height)
{
num6 = height - 1;
}
num3 = num5 * width;
num4 = num6 * width;
num17 -= num11;
num15 += num14;
num14 += num11;
}
}
}
}
}
Is there a way to conver this code to win2d or Sharpdx?
I'm trying to implement fast voxel traversal algorithm and calculate T and M according to this answer (T is tDelta, M is tMax). All is good if the two components of the direction vector V are positive. But if at least one of them is negative, it's work wrong.
Green point is start, red is end. All seems correct.
And now from bigger to less position.
Traversal method:
private IEnumerable<Vector2> GetCrossedCells(Vector2 pPoint1, Vector2 pPoint2)
{
Vector2 V = pPoint2 - pPoint1; // direction & distance vector
if (V != Vector2.Zero)
{
Vector2 U = Vector2.Normalize(V); // direction unit vector
Vector2 S = new Vector2(Math.Sign(U.X), Math.Sign(U.Y)); // sign vector
Vector2 P = pPoint1; // position
Vector2 G = new Vector2((int) Math.Floor(P.X / CELL_SIZE), (int) Math.Floor(P.Y / CELL_SIZE)); // grid coord
Vector2 T = new Vector2(Math.Abs(CELL_SIZE / U.X), Math.Abs(CELL_SIZE / U.Y));
Vector2 M = new Vector2(
Single.IsInfinity(T.X) ? Single.PositiveInfinity : T.X * (1.0f - (P.X / CELL_SIZE) % 1),
Single.IsInfinity(T.Y) ? Single.PositiveInfinity : T.Y * (1.0f - (P.Y / CELL_SIZE) % 1));
Vector2 D = Vector2.Zero;
bool isCanMoveByX = S.X != 0;
bool isCanMoveByY = S.Y != 0;
while (isCanMoveByX || isCanMoveByY)
{
yield return G;
D = new Vector2(
S.X > 0 ? (float) (Math.Floor(P.X / CELL_SIZE) + 1) * CELL_SIZE - P.X :
S.X < 0 ? (float) (Math.Ceiling(P.X / CELL_SIZE) - 1) * CELL_SIZE - P.X :
0,
S.Y > 0 ? (float) (Math.Floor(P.Y / CELL_SIZE) + 1) * CELL_SIZE - P.Y :
S.Y < 0 ? (float) (Math.Ceiling(P.Y / CELL_SIZE) - 1) * CELL_SIZE - P.Y :
0);
if (Math.Abs(V.X) <= Math.Abs(D.X))
{
D.X = V.X;
isCanMoveByX = false;
}
if (Math.Abs(V.Y) <= Math.Abs(D.Y))
{
D.Y = V.Y;
isCanMoveByY = false;
}
if (M.X <= M.Y)
{
M.X += T.X;
G.X += S.X;
if (isCanMoveByY)
{
D.Y = U.Y / U.X * D.X; // U.X / U.Y = D.X / D.Y => U.X * D.Y = U.Y * D.X
}
}
else
{
M.Y += T.Y;
G.Y += S.Y;
if (isCanMoveByX)
{
D.X = U.X / U.Y * D.Y;
}
}
V -= D;
P += D;
}
}
}
In debug I can see that for example M.Y > M.X when should be the opposite if S.X < 0 or S.Y < 0.
Tell me please what my code work wrong for negative directions?
So, I solve it.
I make code cleaner and problem is gone.
private IEnumerable<Vector2> GetCrossedCells(Vector2 pPoint1, Vector2 pPoint2)
{
if (pPoint1 != pPoint2)
{
Vector2 V = (pPoint2 - pPoint1) / CELL_SIZE; // direction & distance vector
Vector2 U = Vector2.Normalize(V); // direction unit vector
Vector2 S = new Vector2(Math.Sign(U.X), Math.Sign(U.Y)); // sign vector
Vector2 P = pPoint1 / CELL_SIZE; // position in grid coord system
Vector2 G = new Vector2((int) Math.Floor(P.X), (int) Math.Floor(P.Y)); // grid coord
Vector2 T = new Vector2(Math.Abs(CELL_SIZE / U.X), Math.Abs(CELL_SIZE / U.Y));
Vector2 D = new Vector2(
S.X > 0 ? 1 - P.X % 1 : S.X < 0 ? P.X % 1 : 0,
S.Y > 0 ? 1 - P.Y % 1 : S.Y < 0 ? P.Y % 1 : 0);
Vector2 M = new Vector2(
Single.IsInfinity(T.X) || S.X == 0 ? Single.PositiveInfinity : T.X * D.X,
Single.IsInfinity(T.Y) || S.Y == 0 ? Single.PositiveInfinity : T.Y * D.Y);
bool isCanMoveByX = S.X != 0;
bool isCanMoveByY = S.Y != 0;
while (isCanMoveByX || isCanMoveByY)
{
yield return G;
D = new Vector2(
S.X > 0 ? (float) Math.Floor(P.X) + 1 - P.X :
S.X < 0 ? (float) Math.Ceiling(P.X) - 1 - P.X :
0,
S.Y > 0 ? (float) Math.Floor(P.Y) + 1 - P.Y :
S.Y < 0 ? (float) Math.Ceiling(P.Y) - 1 - P.Y :
0);
if (Math.Abs(V.X) <= Math.Abs(D.X))
{
D.X = V.X;
isCanMoveByX = false;
}
if (Math.Abs(V.Y) <= Math.Abs(D.Y))
{
D.Y = V.Y;
isCanMoveByY = false;
}
if (M.X <= M.Y)
{
M.X += T.X;
G.X += S.X;
if (isCanMoveByY)
{
D.Y = U.Y / U.X * D.X; // U.X / U.Y = D.X / D.Y => U.X * D.Y = U.Y * D.X
}
}
else
{
M.Y += T.Y;
G.Y += S.Y;
if (isCanMoveByX)
{
D.X = U.X / U.Y * D.Y;
}
}
V -= D;
P += D;
}
}
}
Update
I'm began from removing redundant divisions on GRID_CELL and then notice mistake in M calculation.
There are using Frac() function in answer to the question, a link to which I provided. I'm calculate it like (1 - P % 1), but that is a case for S > 0, and there are should be (P % 1) if S < 0, and Inf for S = 0.
Update 2
Also there should be
Vector2 D = new Vector2(
S.X > 0 ? (float) Math.Floor(P.X) + 1 - P.X :
S.X < 0 ? (float) Math.Ceiling(P.X) - 1 - P.X :
0,
S.Y > 0 ? (float) Math.Floor(P.Y) + 1 - P.Y :
S.Y < 0 ? (float) Math.Ceiling(P.Y) - 1 - P.Y :
0);
Instead of
Vector2 D = new Vector2(
S.X > 0 ? 1 - P.X % 1 : S.X < 0 ? P.X % 1 : 0,
S.Y > 0 ? 1 - P.Y % 1 : S.Y < 0 ? P.Y % 1 : 0);
Because M will be infinity in case S < 0 and P haven't fractional part.
Hello I am trying to convert an ARGB 8888 image into yuv 420 sp in android and I am getting a totally greenish and compressed image.Please help me in code if I am doing it the correct way.
The code seems something as below.
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
int R, G, B;
int Y, U, V;
int index = 0;
for (int j = 0; j < screenHeight; j++) {
for (int i = 0; i < screenWidth; i++) {
int pp = (j * screenWidth + i) * 4;
//a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
Update:
Screenshot illustrating the problem:
I think I have made some changes and it looks some what as the original image but it is not clear or legible.Can I get some ideas on how to make it almost as the original image
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uvIndex=frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
int pp = (j * width + i) * 4;
R = argb[pp+ 0];
G = argb[pp + 1];
B = argb[pp + 2];
a = argb[pp + 3];
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
You're not storing the YUV data correctly. According to this document, YUV420SP data is stored in two planes, one containing the Y data, and another containing the interleaved U and V data:
| Y_0 | Y_1 | Y_2 | Y_3 | Y_4 | ... | Y_w-2 | Y_w-1 | /* h rows */
| Y_w | Y_w+1 | Y_w+2 | ...
:
:
| U_0 | V_0 | U_2 | V_2 | U_4 | ... | U_w-2 | V_w-2 | /* h/2 rows */
| U_2w | V_2w | U_2w+2| ...
:
:
Your code seems to be storing the U and V data in separate planes:
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
What I was thinking to do is to convert ofColor to l*a*b color space and measure the euclidean distance. But I don't know how should I do it in openframeworks?
I'm not very experienced with c++ but I ported this snippet over:
//ported from http://cookbooks.adobe.com/post_Useful_color_equations__RGB_to_LAB_converter-14227.html
struct Color{
float R,G,B,X,Y,Z,L,a,b;
};
#define REF_X 95.047; // Observer= 2°, Illuminant= D65
#define REF_Y 100.000;
#define REF_Z 108.883;
Color rgb2xyz(int R,int G,int B){
float r = R / 255.0;
float g = G / 255.0;
float b = B / 255.0;
if (r > 0.04045){ r = pow((r + 0.055) / 1.055, 2.4); }
else { r = r / 12.92; }
if ( g > 0.04045){ g = pow((g + 0.055) / 1.055, 2.4); }
else { g = g / 12.92; }
if (b > 0.04045){ b = pow((b + 0.055) / 1.055, 2.4); }
else { b = b / 12.92; }
r = r * 100;
g = g * 100;
b = b * 100;
//Observer. = 2°, Illuminant = D65
Color xyz;
xyz.X = r * 0.4124 + g * 0.3576 + b * 0.1805;
xyz.Y = r * 0.2126 + g * 0.7152 + b * 0.0722;
xyz.Z = r * 0.0193 + g * 0.1192 + b * 0.9505;
return xyz;
}
Color xyz2lab(float X,float Y, float Z){
float x = X / REF_X;
float y = Y / REF_X;
float z = Z / REF_X;
if ( x > 0.008856 ) { x = pow( x , .3333333333f ); }
else { x = ( 7.787 * x ) + ( 16/116.0 ); }
if ( y > 0.008856 ) { y = pow( y , .3333333333f ); }
else { y = ( 7.787 * y ) + ( 16/116.0 ); }
if ( z > 0.008856 ) { z = pow( z , .3333333333f ); }
else { z = ( 7.787 * z ) + ( 16/116.0 ); }
Color lab;
lab.L = ( 116 * y ) - 16;
lab.a = 500 * ( x - y );
lab.b = 200 * ( y - z );
return lab;
}
Color lab2xyz(float l, float a, float b){
float y = (l + 16) / 116;
float x = a / 500 + y;
float z = y - b / 200;
if ( pow( y , 3 ) > 0.008856 ) { y = pow( y , 3 ); }
else { y = ( y - 16 / 116 ) / 7.787; }
if ( pow( x , 3 ) > 0.008856 ) { x = pow( x , 3 ); }
else { x = ( x - 16 / 116 ) / 7.787; }
if ( pow( z , 3 ) > 0.008856 ) { z = pow( z , 3 ); }
else { z = ( z - 16 / 116 ) / 7.787; }
Color xyz;
xyz.X = x * REF_X;
xyz.Y = y * REF_Y;
xyz.Z = z * REF_Z;
return xyz;
}
Color xyz2rgb(float X,float Y,float Z){
//X from 0 to 95.047 (Observer = 2°, Illuminant = D65)
//Y from 0 to 100.000
//Z from 0 to 108.883
X = ofClamp(X, 0, 95.047);
float x = X * .01;
float y = Y * .01;
float z = Z * .01;
float r = x * 3.2406 + y * -1.5372 + z * -0.4986;
float g = x * -0.9689 + y * 1.8758 + z * 0.0415;
float b = x * 0.0557 + y * -0.2040 + z * 1.0570;
if ( r > 0.0031308 ) { r = 1.055 * pow( r , ( 1 / 2.4f ) ) - 0.055; }
else { r = 12.92 * r; }
if ( g > 0.0031308 ) { g = 1.055 * pow( g , ( 1 / 2.4f ) ) - 0.055; }
else { g = 12.92 * g; }
if ( b > 0.0031308 ) { b = 1.055 * pow( b , ( 1 / 2.4f ) ) - 0.055; }
else { b = 12.92 * b; }
Color rgb;
rgb.R = round( r * 255 );
rgb.G = round( g * 255 );
rgb.B = round( b * 255 );
return rgb;
}
Color rgb2lab(int R,int G,int B){
Color xyz = rgb2xyz(R, G, B);
return xyz2lab(xyz.X, xyz.Y, xyz.Z);
}
Color lab2rgb(int L,int a,int b){
Color xyz = lab2xyz(L, a, b);
return xyz2rgb(xyz.X, xyz.Y, xyz.Z);
}
Measuring the distance would be something as trivial as:
float distLab(Color c1,Color c2){
float dL = c1.L - c2.L;
float da = c1.a - c2.a;
float db = c1.b - c2.b;
return sqrt(dL*dL + da*da + db*db);
}
or ofVec3f(c1.L,c1.a,c1.b).distance(ofVec3f(c2.L,c2.a,c2.b));
Also see this answer for an openframeworks basic example.