Right now I am converting an image from YCrCb to RGB using OpenCV:
cv2.cvtColor(arr, cv2.COLOR_YCR_CB2RGB)
Is there a function in Pillow / PIL to perform this same color conversion. At the very least I would like to perform the color conversion without needing OpenCV.
I tried the following:
def _rgb( xxx ):
y, cb, cr = xxx
r = y + 1.402 * ( cr - 128 )
g = y - .34414 * ( cb - 128 ) - .71414 * ( cr - 128 )
b = y + 1.772 * ( cb - 128 )
return r, g, b
np.apply_along_axis( _rgb, 2, arr.astype( np.float32 ) ).astype( np.uint8 )
and it is very slow and does not quite work.
Conversion per-se
YCrCb-Colorspace conversion to RGB-Colorspace states:
R = Y + 1.402 * ( Cr - 128 )
G = Y - 0.34414 * ( Cb - 128 ) - 0.71414 * ( Cr - 128 )
B = Y + 1.772 * ( Cb - 128 )
Nota Bene 1:
openCV sources document it's conversion process to be performed with different coefs than the http://en.wikipedia.org/wiki/HSL_and_HSV based on ITU-R Recommendation BT-709, resp. BT-601:
R = Y + 1.403 * ( Cr - delta )
G = Y - 0.344 * ( Cb - delta ) - 0.714 * ( Cr - delta )
B = Y + 1.773 * ( Cb - delta )
where
delta = 128 # for 8-bit images CV_8U,
# 32768 # for 16-bit images CV_16U,
# 0.5 # for floating-point images CV_32F.
Nota Bene 2: [ref. below]
Efficient implementation
Using vectorised mode, numpy can help with potential further acceleration speedup from JIT-compilation from numba:
import numpy as np
import numba
#numba.jit
def translateYCrCb2RGB( a3DMatrixOfUINT8_YCrCb ): # naive type-checking & no exception handling
a3DMatrixOfUINT8_RGB = np.zeros( a3DMatrixOfUINT8_YCrCb.shape,
dtype = np.uint8
)
a3DMatrixOfUINT8_RGB[:,:,0] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
+ 1.402 * ( a3DMatrixOfUINT8_YCrCb[:,:,1] - 128 )
a3DMatrixOfUINT8_RGB[:,:,1] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
- 0.34414 * ( a3DMatrixOfUINT8_YCrCb[:,:,2] - 128 ) \
- 0.71414 * ( a3DMatrixOfUINT8_YCrCb[:,:,1] - 128 )
a3DMatrixOfUINT8_RGB[:,:,2] = a3DMatrixOfUINT8_YCrCb[:,:,0] \
+ 1.772 * ( a3DMatrixOfUINT8_YCrCb[:,:,2] - 128 )
return( a3DMatrixOfUINT8_RGB )
Further acceleration tricks may help at a cost of a larger memory footprint or destructive handling of the mutable original YCrCb-matrix
Pre-sliced approach
#numba.jit
def translateYCrCb2RGB( Y__slice, # YCrCb_ORIGINAL[:,:,0], # ... asView
Cr_slice, # YCrCb_ORIGINAL[:,:,1], # ... asView
Cb_slice # YCrCb_ORIGINAL[:,:,2] # ... asView
): # naive type-checking & no exception handling
return( np.dstack( ( Y__slice + 1.402 * ( Cr_slice - 128 ),
Y__slice - 0.34414 * ( Cb_slice - 128 ) - 0.71414 * ( Cr_slice - 128 ),
Y__slice + 1.772 * ( Cb_slice - 128 )
) # .dstack consumes aTUPLE
)
)
Conventions need not match
def getCvFromPIL( PILpic ):
return np.array( PILpic.getdata(), # .getdata()
dtype = np.uint8 # .uint8 type-enforced
).reshape( ( PILpic.size[1], # .reshape x
PILpic.size[0], # y
3 # z-depth
) # aTUPLE
)[:,:,::-1] # RGB c-reverse -> to BGR as cv2 standard representation
2)
From openCV sources one may read about implemented precision of coefs:
template<typename _Tp> struct YCrCb2RGB_f
{
typedef _Tp channel_type;
YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
{
static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f};
memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max();
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3];
n *= 3;
for(int i = 0; i < n; i += 3, dst += dcn)
{
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
_Tp b = saturate_cast<_Tp>(Y + (Cb - delta)*C3);
_Tp g = saturate_cast<_Tp>(Y + (Cb - delta)*C2 + (Cr - delta)*C1);
_Tp r = saturate_cast<_Tp>(Y + (Cr - delta)*C0);
dst[bidx] = b; dst[1] = g; dst[bidx^2] = r;
if( dcn == 4 )
dst[3] = alpha;
}
}
int dstcn, blueIdx;
float coeffs[4];
};
Related
How to get XY value from ct.
Ex: ct = 217, I want to get x="0.3127569", y= "0.32908".
I'm able to convert XY value into ct value using this below code.
float R1 = [hue[0] floatValue];
float S1 = [hue[1] floatValue];
float result = ((R1-0.332)/(S1-0.1858));
NSString *ctString = [NSString stringWithFormat:#"%f", ((-449*result*result*result)+(3525*result*result)-(6823.3*result)+(5520.33))];
float micro2 = (float) (1 / [ctString floatValue] * 1000000);
NSString *ctValue = [NSString stringWithFormat:#"%f", micro2];
ctValue = [NSString stringWithFormat:#"%d", [ctValue intValue]];
if ([ctValue integerValue] < 153) {
ctValue = [NSString stringWithFormat:#"%d", 153];
}
Now I want reverse value, which is from ct to XY.
On Phillips HUE
2000K maps to 500 and 6500K maps to 153 given in ct as color temperature but can be thought as actually being Mired.
Mired means micro reciprocal degree wikipedia.
ct is possibly used because it is not 100% Mired. Quite sure Phillips uses a lookup table as a lot CIE algorithms do because there are just 347 indexes in this range from 153 to 500.
The following is not a solution, it's just simple concept of a lookup table.
And as the CIE 1931 xy to CCT Formula by McCamy suggests found here it is possible to use a lookup table to find x and y as well.
A table can be found here but i am not sure if that is the right lookup table.
reminder so the following is not a solution, but to find an reverse algo the code may help.
typedef int Kelvin;
typedef float Mired;
Mired linearMiredByKelvin(Kelvin k) {
if (k==0) return 0;
return 1000000.0/k;
}
-(void)mired {
Mired miredMin = 2000.0/13.0; // 153,84 = reciprocal 6500K
Mired miredMax = 500.0; // 500,00 = reciprocal 2000K
Mired lookupMiredByKelvin[6501]; //max 6500 Kelvin + 1 safe index
//Kelvin lookupKelvinByMired[501]; //max 500 Mired + 1 safe index
// dummy stuff, empty unused table space
for (Kelvin k = 0; k < 2000; k++) {
lookupMiredByKelvin[k] = 0;
}
//for (Mired m = 0.0; m < 154.0; m++) {
// lookupKelvinByMired[(int)m] = 0;
//}
for (Kelvin k=2000; k<6501; k++) {
Mired linearMired = linearMiredByKelvin(k);
float dimm = (linearMired - miredMin) / ( miredMax - miredMin);
Kelvin ct = (Kelvin)(1000000.0/(dimm*miredMax - dimm*miredMin + miredMin));
lookupMiredByKelvin[k] = linearMiredByKelvin(ct);
if (k==2000 || k==2250 || k==2500 || k==2750 ||
k==3000 || k==3250 || k==3500 || k==3750 ||
k==4000 || k==4250 || k==4500 || k==4750 ||
k==5000 || k==5250 || k==5500 || k==5750 ||
k==6000 || k==6250 || k==6500 || k==6501 )
fprintf(stderr,"%d %f %f\n",ct, dimm, lookupMiredByKelvin[k]);
}
}
at least this is proof that x and y will not sit on a simple vector.
CCT means correlated colour temperature and like the implementation in the question shows can be calculated via n= (x-0.3320)/(0.1858-y); CCT = 437*n^3 + 3601*n^2 + 6861*n + 5517. (after McCamy)
but a cct=217 is out of range of above link'ed lookup table.
following the idea in this git-repo from colour-science
and ported to C it could look like..
void CCT_to_xy_CIE_D(float cct) {
//if (CCT < 4000 || CCT > 25000) fprintf(stderr, "Correlated colour temperature must be in domain, unpredictable results may occur! \n");
float x = calculateXviaCCT(cct);
float y = calculateYviaX(x);
NSLog(#"cct=%f x%f y%f",cct,x,y);
}
float calculateXviaCCT(float cct) {
float cct_3 = pow(cct, 3); //(cct*cct*cct);
float cct_2 = pow(cct, 2); //(cct*cct);
if (cct<=7000)
return -4.607 * pow(10, 9) / cct_3 + 2.9678 * pow(10, 6) / cct_2 + 0.09911 * pow(10, 3) / cct + 0.244063;
return -2.0064 * pow(10, 9) / cct_3 + 1.9018 * pow(10, 6) / cct_2 + 0.24748 * pow(10, 3) / cct + 0.23704;
}
float calculateYviaX(float x) {
return -3.000 * pow(x, 2) + 2.870 * x - 0.275;
}
CCT_to_xy_CIE_D(6504.38938305); //proof of concept
//cct=6504.389160 x0.312708 y0.329113
CCT_to_xy_CIE_D(217.0);
//cct=217.000000 x-387.131073 y-450722.750000
// so for sure Phillips hue temperature given in ct between 153-500 is not a good starting point
//but
CCT_to_xy_CIE_D(2000.0);
//cct=2000.000000 x0.459693 y0.410366
this seems to work fine with CCT between 2000 and 25000, but maybe confusing is CCT is given in Kelvin here.
EDIT
This has been through so many revisions and ideas. To keep it simple I edited most of that out and just give you the final result.
This fits your function perfectly except for a region in the middle (temp from 256 to 316) where it deviates a bit.
The problem with your function is that it has approximately infinite solutions, so to solve it nicely you need more constraints, but what? Ol Sen's reference https://www.waveformlighting.com/tech/calculate-color-temperature-cct-from-cie-1931-xy-coordinates discusses it in some detail and then mentions that you want a Duv to be zero. It also gives a way to calculate Duv and so I added that to my optimiser and voila!
Nice and smooth. The optimiser now solves for x and y that both satisfies your function and also minimises Duv.
To get it to work nicely I had to scale Duv quite a bit. That page mentions that Duv should be very small so I think this is a good thing. Also, as the temp increases the scaling should to help the optimiser.
Below prints from 153 to 500.
#import <Foundation/Foundation.h>
// Function taken from your code
// Simplified a bit
int ctFuncI ( float x, float y )
{
// float R1 = [hue[0] floatValue];
// float S1 = [hue[1] floatValue];
float result = (x-0.332)/(y-0.1858);
float cubic = - 449 * result * result * result + 3525 * result * result - 6823.3 * result + 5520.33;
float micro2 = 1 / cubic * 1000000;
int ct = ( int )( micro2 + 0.5 );
if ( ct < 153 )
{
ct = 153;
}
return ct;
}
// Need this
// Float version of your code
float ctFuncF ( float x, float y )
{
// float R1 = [hue[0] floatValue];
// float S1 = [hue[1] floatValue];
float result = (x-0.332)/(y-0.1858);
float cubic = - 449 * result * result * result + 3525 * result * result - 6823.3 * result + 5520.33;
return 1000000 / cubic;
}
// We need an additional constraint
// https://www.waveformlighting.com/tech/calculate-duv-from-cie-1931-xy-coordinates
// Given x, y calculate Duv
// We want this to be 0
float duv ( float x, float y )
{
float f = 1 / ( - 2 * x + 12 * y + 3 );
float u = 4 * x * f;
float v = 6 * y * f;
// I'm typing float but my heart yells double
float k6 = -0.00616793;
float k5 = 0.0893944;
float k4 = -0.5179722;
float k3 = 1.5317403;
float k2 = -2.4243787;
float k1 = 1.925865;
float k0 = -0.471106;
float du = u - 0.292;
float dv = v - 0.24;
float Lfp = sqrt ( du * du + dv * dv );
float a = acos( du / Lfp );
float Lbb = k6 * pow ( a, 6 ) + k5 * pow( a, 5 ) + k4 * pow( a, 4 ) + k3 * pow( a, 3 ) + k2 * pow(a,2) + k1 * a + k0;
return Lfp - Lbb;
}
// Solver!
// Returns iterations
int ctSolve ( int ct, float * x, float * y )
{
int iter = 0;
float dx = 0.001;
float dy = 0.001;
// Error
// Note we scale duv a bit
// Seems the higher the temp, the higher scale we require
// Also note the jump at 255 ...
float s = 1000 * ( ct > 255 ? 10 : 1 );
float d = fabs( ctFuncF ( * x, * y ) - ct ) + s * fabs( duv ( * x, * y ) );
// Approx
while ( d > 0.5 && iter < 250 )
{
iter ++;
dx *= fabs( ctFuncF ( * x + dx, * y ) - ct ) + s * fabs( duv ( * x + dx, * y ) ) < d ? 1.2 : - 0.5;
dy *= fabs( ctFuncF ( * x, * y + dy ) - ct ) + s * fabs( duv ( * x, * y + dy ) ) < d ? 1.2 : - 0.5;
* x += dx;
* y += dy;
d = fabs( ctFuncF ( * x, * y ) - ct ) + s * fabs( duv ( * x, * y ) );
}
return iter;
}
// Tester
int main(int argc, const char * argv[]) {
#autoreleasepool
{
// insert code here...
NSLog(#"Hello, World!");
float x, y;
int sume = 0;
int sumi = 0;
for ( int ct = 153; ct <= 500; ct ++ )
{
// Initial guess
x = 0.4;
y = 0.4;
// Approx
int iter = ctSolve ( ct, & x, & y );
// CT and error
int ctEst = ctFuncI ( x, y );
int e = ct - ctEst;
// Diagnostics
sume += abs ( e );
sumi += iter;
// Print out results
NSLog ( #"want ct = %d x = %f y = %f got ct %d in %d iter error %d", ct, x, y, ctEst, iter, e );
}
NSLog ( #"Sum of abs errors %d iterations %d", sume, sumi );
}
return 0;
}
To use it, do as below.
// To call it, init x and y to some guess
float x = 0.4;
float y = 0.4;
// Then call solver with your temp
int ct = 217;
ctSolve( ct, & x, & y ); // Note you pass references to x and y
// Done, answer now in x and y
a bit more compact answer and functions to convert back and forth..
beware there are rounding issues because McCamy's formula relies and mathematical assumptions. And so the backward calculation does also.
if you want to find more results search directly for "n= (x-0.3320)/(0.1858-y); CCT = 437*n^3 + 3601*n^2 + 6861*n + 5517" there are plenty of different methods to convert back and forth.
so here Phillips-Hue #[#x,#y] to Phillips-ct,Phillips-ct to CCT, CCT to x,y
void CCT_to_xy_CIE_D(float cct) {
//if (CCT < 4000 || CCT > 25000) fprintf(stderr, "Correlated colour temperature must be in domain, unpredictable results may occur! \n");
float x = calculateXviaCCT(cct);
float y = calculateYviaX(x);
fprintf(stderr,"cct=%f x%f y%f",cct,x,y);
}
float calculateXviaCCT(float cct) {
float cct_3 = pow(cct, 3); //(cct*cct*cct);
float cct_2 = pow(cct, 2); //(cct*cct);
if (cct<=7000.0)
return -4.607 * pow(10, 9) / cct_3 + 2.9678 * pow(10, 6) / cct_2 + 0.09911 * pow(10, 3) / cct + 0.244063;
return -2.0064 * pow(10, 9) / cct_3 + 1.9018 * pow(10, 6) / cct_2 + 0.24748 * pow(10, 3) / cct + 0.23704;
}
float calculateYviaX(float x) {
return -3.000 * x*x + 2.870 * x - 0.275;
}
int calculate_PhillipsHueCT_withCCT(float cct) {
if (cct>6500.0) return 2000.0/13.0;
if (cct<2000.0) return 500.0;
//return (float) (1 / cct * 1000000); // same as..
return 1000000 / cct;
}
float calculate_CCT_withPhillipsHueCT(float ct) {
if (ct == 0.0) return 0.0;
return 1000000 / ct;
}
float calculate_CCT_withHueXY(NSArray *hue) {
float x = [hue[0] floatValue]; //R1
float y = [hue[1] floatValue]; //S1
//x = 0.312708; y = 0.329113;
float n = (x-0.3320)/(0.1858-y);
float cct = 437.0*n*n*n + 3601.0*n*n + 6861.0*n + 5517.0;
return cct;
}
// MC Camy formula n=(x-0.3320)/(0.1858-y); cct = 437*n^3 + 3601*n^2 + 6861*n + 5517;
-(void)testPhillipsHueCt_backAndForth {
NSArray *hue = #[#(0.312708),#(0.329113)];
float cct = calculate_CCT_withHueXY(hue);
float ct = calculate_PhillipsHueCT_withCCT(cct);
NSLog(#"ct %f",ct);
CCT_to_xy_CIE_D(cct); // check
CCT_to_xy_CIE_D(6504.38938305); //proof of concept
CCT_to_xy_CIE_D(2000.0);
CCT_to_xy_CIE_D(calculate_CCT_withPhillipsHueCT(217.0));
}
The XYZ color space encompasses all possible colors, not just those which can be generated by a particular device like a monitor. Not all XYZ triplets represent a color that is physically possible. Is there a way, given an XYZ triplet, to determine if it represents a real color?
I wanted to generate a CIE 1931 chromaticity diagram (seen bellow) for myself, but wasn't sure how to go about it. It's easy to, for example, take all combinations of sRGB triplets and then transform them into the xy coordinates of the chromaticity diagram and then plot them. You cannot use this same approach in the XYZ color space though since not all combinations are valid colors. So far the best I have come up with is a stochastic approach, where I generate a random spectral distribution by summing a random number of random Gaussians, then converting it to XYZ using the standard observer functions.
Having thought about it a little more I felt the obvious solution is to generate a list of xy points around the edge of spectral locus, corresponding to pure monochromatic colors. It seems to me that this can be done by directly inputting the visible frequencies (~380-780nm) into the CIE XYZ standard observer color matching functions. Treating these points like a convex polygon you could determine if a point is within the spectral locus using one algorithm or another. In my case, since what I really wanted to do is simply generate the chromaticity diagram, I simply input these points into a graphics library's polygon drawing routine and then for each pixel of the polygon I can transform it into sRGB.
I believe this solution is similar to the one used by the library that Kel linked in a comment. I'm not entirely sure, as I am not familiar with Python.
function RGBfromXYZ(X, Y, Z) {
const R = 3.2404542 * X - 1.5371385 * Y - 0.4985314 * Z
const G = -0.969266 * X + 1.8760108 * Y + 0.0415560 * Z
const B = 0.0556434 * X - 0.2040259 * Y + 1.0572252 * Z
return [R, G, B]
}
function XYZfromYxy(Y, x, y) {
const X = Y / y * x
const Z = Y / y * (1 - x - y)
return [X, Y, Z]
}
function srgb_from_linear(x) {
if (x <= 0.0031308) {
return x * 12.92
} else {
return 1.055 * Math.pow(x, 1/2.4) - 0.055
}
}
// Analytic Approximations to the CIE XYZ Color Matching Functions
// from Sloan http://jcgt.org/published/0002/02/01/paper.pdf
function xFit_1931(x) {
const t1 = (x - 442) * (x < 442 ? 0.0624 : 0.0374)
const t2 = (x -599.8) * (x < 599.8 ? 0.0264 : 0.0323)
const t3 = (x - 501.1) * (x < 501.1 ? 0.0490 : 0.0382)
return 0.362 * Math.exp(-0.5 * t1 * t1) + 1.056 * Math.exp(-0.5 * t2 * t2) - 0.065 * Math.exp(-0.5 * t3 * t3)
}
function yFit_1931(x) {
const t1 = (x - 568.8) * (x < 568.8 ? 0.0213 : 0.0247)
const t2 = (x - 530.9) * (x < 530.9 ? 0.0613 : 0.0322)
return 0.821 * Math.exp(-0.5 * t1 * t1) + 0.286 * Math.exp(-0.5 * t2 * t2)
}
function zFit_1931(x) {
const t1 = (x - 437) * (x < 437 ? 0.0845 : 0.0278)
const t2 = (x - 459) * (x < 459 ? 0.0385 : 0.0725)
return 1.217 * Math.exp(-0.5 * t1 * t1) + 0.681 * Math.exp(-0.5 * t2 * t2)
}
const canvas = document.createElement("canvas")
document.body.append(canvas)
canvas.width = canvas.height = 512
const ctx = canvas.getContext("2d")
const locus_points = []
for (let i = 440; i < 650; ++i) {
const [X, Y, Z] = [xFit_1931(i), yFit_1931(i), zFit_1931(i)]
const x = (X / (X + Y + Z)) * canvas.width
const y = (Y / (X + Y + Z)) * canvas.height
locus_points.push([x, y])
}
ctx.beginPath()
ctx.moveTo(...locus_points[0])
locus_points.slice(1).forEach(point => ctx.lineTo(...point))
ctx.closePath()
ctx.fill()
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height)
for (let y = 0; y < canvas.height; ++y) {
for (let x = 0; x < canvas.width; ++x) {
const alpha = imageData.data[(y * canvas.width + x) * 4 + 3]
if (alpha > 0) {
const [X, Y, Z] = XYZfromYxy(1, x / canvas.width, y / canvas.height)
const [R, G, B] = RGBfromXYZ(X, Y, Z)
const r = Math.round(srgb_from_linear(R / Math.sqrt(R**2 + G**2 + B**2)) * 255)
const g = Math.round(srgb_from_linear(G / Math.sqrt(R**2 + G**2 + B**2)) * 255)
const b = Math.round(srgb_from_linear(B / Math.sqrt(R**2 + G**2 + B**2)) * 255)
imageData.data[(y * canvas.width + x) * 4 + 0] = r
imageData.data[(y * canvas.width + x) * 4 + 1] = g
imageData.data[(y * canvas.width + x) * 4 + 2] = b
}
}
}
ctx.putImageData(imageData, 0, 0)
I want to extract the red ball from one picture and get the detected ellipse matrix in picture.
Here is my example:
I threshold the picture, find the contour of red ball by using findContour() function and use fitEllipse() to fit an ellipse.
But what I want is to get coefficient of this ellipse. Because the fitEllipse() return a rotation rectangle (RotatedRect), so I need to re-write this function.
One Ellipse can be expressed as Ax^2 + By^2 + Cxy + Dx + Ey + F = 0; So I want to get u=(A,B,C,D,E,F) or u=(A,B,C,D,E) if F is 1 (to construct an ellipse matrix).
I read the source code of fitEllipse(), there are totally three SVD process, I think I can get the above coefficients from the results of those three SVD process. But I am quite confused what does each result (variable cv::Mat x) of each SVD process represent and why there are three SVD here?
Here is this function:
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
Mat points = _points.getMat();
int i, n = points.checkVector(2);
int depth = points.depth();
CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
RotatedRect box;
if( n < 5 )
CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
// New fitellipse algorithm, contributed by Dr. Daniel Weiss
Point2f c(0,0);
double gfp[5], rp[5], t;
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad, *bd = _bd;
// first fit for parameters A - E
Mat A( n, 5, CV_64F, Ad );
Mat b( n, 1, CV_64F, bd );
Mat x( 5, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
c += p;
}
c.x /= n;
c.y /= n;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -(double)p.y * p.y;
Ad[i*5 + 2] = -(double)p.x * p.y;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
}
solve(A, b, x, DECOMP_SVD);
// now use general-form parameters A - E to find the ellipse center:
// differentiate general form wrt x/y to get two equations for cx and cy
A = Mat( 2, 2, CV_64F, Ad );
b = Mat( 2, 1, CV_64F, bd );
x = Mat( 2, 1, CV_64F, rp );
Ad[0] = 2 * gfp[0];
Ad[1] = Ad[2] = gfp[2];
Ad[3] = 2 * gfp[1];
bd[0] = gfp[3];
bd[1] = gfp[4];
solve( A, b, x, DECOMP_SVD );
// re-fit for parameters A - C with those center coordinates
A = Mat( n, 3, CV_64F, Ad );
b = Mat( n, 1, CV_64F, bd );
x = Mat( 3, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 1.0;
Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
}
solve(A, b, x, DECOMP_SVD);
// store angle and radii
rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
if( fabs(gfp[2]) > min_eps )
t = gfp[2]/sin(-2.0 * rp[4]);
else // ellipse is rotated by an integer multiple of pi/2
t = gfp[1] - gfp[0];
rp[2] = fabs(gfp[0] + gfp[1] - t);
if( rp[2] > min_eps )
rp[2] = std::sqrt(2.0 / rp[2]);
rp[3] = fabs(gfp[0] + gfp[1] + t);
if( rp[3] > min_eps )
rp[3] = std::sqrt(2.0 / rp[3]);
box.center.x = (float)rp[0] + c.x;
box.center.y = (float)rp[1] + c.y;
box.size.width = (float)(rp[2]*2);
box.size.height = (float)(rp[3]*2);
if( box.size.width > box.size.height )
{
float tmp;
CV_SWAP( box.size.width, box.size.height, tmp );
box.angle = (float)(90 + rp[4]*180/CV_PI);
}
if( box.angle < -180 )
box.angle += 360;
if( box.angle > 360 )
box.angle -= 360;
return box;
}
The source code link: https://github.com/Itseez/opencv/blob/master/modules/imgproc/src/shapedescr.cpp
The function fitEllipse returns a RotatedRect that contains all the parameters of the ellipse.
An ellipse is defined by 5 parameters:
xc : x coordinate of the center
yc : y coordinate of the center
a : major semi-axis
b : minor semi-axis
theta : rotation angle
You can obtain these parameters like:
RotatedRect e = fitEllipse(points);
float xc = e.center.x;
float yc = e.center.y;
float a = e.size.width / 2; // width >= height
float b = e.size.height / 2;
float theta = e.angle; // in degrees
You can draw an ellipse with the function ellipse using the RotatedRect:
ellipse(image, e, Scalar(0,255,0));
or, equivalently using the ellipse parameters:
ellipse(res, Point(xc, yc), Size(a, b), theta, 0.0, 360.0, Scalar(0,255,0));
If you need the values of the coefficients of the implicit equation, you can do like (from Wikipedia):
So, you can get the parameters you need from the RotatedRect, and you don't need to change the function fitEllipse.
The solve function is used to solve linear systems or least-squares problems. Using the SVD decomposition method the system can be over-defined and/or the matrix src1 can be singular.
For more details on the algorithm, you can see the paper of Fitzgibbon that proposed this fit ellipse method.
Here is some code that worked for me which I based on the other responses on this thread.
def getConicCoeffFromEllipse(e):
# ellipse(Point(xc, yc),Size(a, b), theta)
xc = e[0][0]
yc = e[0][1]
a = e[1][0]/2
b = e[1][1]/2
theta = math.radians(e[2])
# See https://en.wikipedia.org/wiki/Ellipse
# Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0 is the equation
A = a*a*math.pow(math.sin(theta),2) + b*b*math.pow(math.cos(theta),2)
B = 2*(b*b - a*a)*math.sin(theta)*math.cos(theta)
C = a*a*math.pow(math.cos(theta),2) + b*b*math.pow(math.sin(theta),2)
D = -2*A*xc - B*yc
E = -B*xc - 2*C*yc
F = A*xc*xc + B*xc*yc + C*yc*yc - a*a*b*b
coef = np.array([A,B,C,D,E,F]) / F
return coef
def getConicMatrixFromCoeff(c):
C = np.array([[c[0], c[1]/2, c[3]/2], # [ a, b/2, d/2 ]
[c[1]/2, c[2], c[4]/2], # [b/2, c, e/2 ]
[c[3]/2, c[4]/2, c[5]]]) # [d/2], e/2, f ]
return C
I converted an RGB matrix to YUV matrix using this formula:
Y = (0.257 * R) + (0.504 * G) + (0.098 * B) + 16
Cr = V = (0.439 * R) - (0.368 * G) - (0.071 * B) + 128
Cb = U = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128
I then did a 4:2:0 chroma subsample on the matrix. I think I did this correctly, I took 2x2 submatrices from the YUV matrix, ordered the values from least to greatest, and took the average between the 2 values in the middle.
I then used this formula, from Wikipedia, to access the Y, U, and V planes:
size.total = size.width * size.height;
y = yuv[position.y * size.width + position.x];
u = yuv[(position.y / 2) * (size.width / 2) + (position.x / 2) + size.total];
v = yuv[(position.y / 2) * (size.width / 2) + (position.x / 2) + size.total + (size.total / 4)];
I'm using OpenCV so I tried to interpret this as best I can:
y = src.data[(i*channels)+(j*step)];
u = src.data[(j%4)*step + ((i%2)*channels+1) + max];
v = src.data[(j%4)*step + ((i%2)*channels+2) + max + (max%4)];
src is the YUV subsampled matrix. Did I interpret that formula correctly?
Here is how I converted the colours back to RGB:
bgr.data[(i*channels)+(j*step)] = (1.164 * (y - 16)) + (2.018 * (u - 128)); // B
bgr.data[(i*channels+1)+(j*step)] = (1.164 * (y - 16)) - (0.813 * (v - 128)) - (0.391 * (u - 128)); // G
bgr.data[(i*channels+2)+(j*step)] = (1.164 * (y - 16)) + (1.596 * (v - 128)); // R
The problem is my image does not return to its original colours.
Here are the images for reference:
http://i.stack.imgur.com/vQkpT.jpg (Subsampled)
http://i.stack.imgur.com/Oucc5.jpg (Output)
I see that I should be converting from YUV444 to RGB now but I don't quite I understand what the clip function does in the sample I found on Wiki.
C = Y' − 16
D = U − 128
E = V − 128
R = clip(( 298 * C + 409 * E + 128) >> 8)
G = clip(( 298 * C - 100 * D - 208 * E + 128) >> 8)
B = clip(( 298 * C + 516 * D + 128) >> 8)
Does the >> mean I should shift bits?
I'd appreciate any help/comments! Thanks
Update
Tried doing the YUV444 conversion but it just made my image appear in shades of green.
y = src.data[(i*channels)+(j*step)];
u = src.data[(j%4)*step + ((i%2)*channels+1) + max];
v = src.data[(j%4)*step + ((i%2)*channels+2) + max + (max%4)];
c = y - 16;
d = u - 128;
e = v - 128;
bgr.data[(i*channels+2)+(j*step)] = clip((298*c + 409*e + 128)/256);
bgr.data[(i*channels+1)+(j*step)] = clip((298*c - 100*d - 208*e + 128)/256);
bgr.data[(i*channels)+(j*step)] = clip((298*c + 516*d + 128)/256);
And my clip function:
int clip(double value)
{
return (value > 255) ? 255 : (value < 0) ? 0 : value;
}
I had the same problem when decoding WebM frames to RGB. I finally found the solution after hours of searching.
Take SCALEYUV function from here: http://www.telegraphics.com.au/svn/webpformat/trunk/webpformat.h
Then to decode the RGB data from YUV, see this file:
http://www.telegraphics.com.au/svn/webpformat/trunk/decode.c
Search for "py = img->planes[0];", there are two algorithms to convert the data. I only tried the simple one (after "// then fall back to cheaper method.").
Comments in the code also refer to this page: http://www.poynton.com/notes/colour_and_gamma/ColorFAQ.html#RTFToC30
Works great for me.
You won't get back perfectly the same image since UV does compress the image.
You don't say if the result is completely wrong (ie an error) or just not perfect
R = clip(( 298 * C + 409 * E + 128) >> 8)
G = clip(( 298 * C - 100 * D - 208 * E + 128) >> 8)
B = clip(( 298 * C + 516 * D + 128) >> 8)
The >> 8 is a bit shift, equivalent to dividing by 256. This is just to allow you to do all the arithmatic in integer units rather than floating point for speed
Was experimenting with formulas present on wiki and found that mixed formula:
byte c = (byte) (y - 16);
byte d = (byte) (u - 128);
byte e = (byte) (v - 128);
byte r = (byte) (c + (1.370705 * (e)));
byte g = (byte) (c - (0.698001 * (d)) - (0.337633 * (e)));
byte b = (byte) (c + (1.732446 * (d)));
produces "better" errors for my images, simply makes some black points pure green (i.e. rgb = 0x00FF00) which is better for detection and correction ...
wiki source: https://en.wikipedia.org/wiki/YUV#Y.27UV420p_.28and_Y.27V12_or_YV12.29_to_RGB888_conversion
I know the following formula can be used to convert RGB images to YUV images. In the following formula, R, G, B, Y, U, V are all 8-bit unsigned integers, and intermediate values are 16-bit unsigned integers.
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128
But when the formula is used in OpenCL it's a different story.
1. 8-bit memory write access is an optional extension, which means some OpenCL implementations may not support it.
2. even the above extension is supported, it's deadly slow compared with 32-bit write access.
In order to get better performance, every 4 pixels will be processed at the same time, so the input is 12 8-bit integers and the output is 3 32-bit unsigned integers(the first one stands for 4 Y samples, the second one stands for 4 U samples, the last one stands for 4 V samples).
My question is how to get these 3 32-bit integers directly from the 12 8-bit integers? Is there a formula to get these 3 32-bit integers, or I just need to use the old formula to get 12 8-bit integer results(4 Y, 4 U, 4 V) and construct the 3 32-bit integers with bit-wise operation?
Even though this question was asked 2 years ago, i think some working code would help here. In terms of the initial concerns about bad performance when directly accessing 8-bit values, it's better to perform 32-bit direct access when possible.
Some time ago I've developed and used the following OpenCL kernel to convert ARGB (typical windows bitmap pixel layout) to the y-plane (full sized), u/v-half-plane (quarter sized) memory layout as input for libx264 encoding.
__kernel void ARGB2YUV (
__global unsigned int * sourceImage,
__global unsigned int * destImage,
unsigned int srcHeight,
unsigned int srcWidth,
unsigned int yuvStride // must be srcWidth/4 since we pack 4 pixels into 1 Y-unit (with 4 y-pixels)
)
{
int i,j;
unsigned int RGBs [ 4 ];
unsigned int posSrc, RGB, Value4 = 0, Value, yuvStrideHalf, srcHeightHalf, yPlaneOffset, posOffset;
unsigned char red, green, blue;
unsigned int posX = get_global_id(0);
unsigned int posY = get_global_id(1);
if ( posX < yuvStride ) {
// Y plane - pack 4 y's within each work item
if ( posY >= srcHeight )
return;
posSrc = (posY * srcWidth) + (posX * 4);
RGBs [ 0 ] = sourceImage [ posSrc ];
RGBs [ 1 ] = sourceImage [ posSrc + 1 ];
RGBs [ 2 ] = sourceImage [ posSrc + 2 ];
RGBs [ 3 ] = sourceImage [ posSrc + 3 ];
for ( i=0; i<4; i++ ) {
RGB = RGBs [ i ];
blue = RGB & 0xff; green = (RGB >> 8) & 0xff; red = (RGB >> 16) & 0xff;
Value = ( ( 66 * red + 129 * green + 25 * blue ) >> 8 ) + 16;
Value4 |= (Value << (i * 8));
}
destImage [ (posY * yuvStride) + posX ] = Value4;
return;
}
posX -= yuvStride;
yuvStrideHalf = yuvStride >> 1;
// U plane - pack 4 u's within each work item
if ( posX >= yuvStrideHalf )
return;
srcHeightHalf = srcHeight >> 1;
if ( posY < srcHeightHalf ) {
posSrc = ((posY * 2) * srcWidth) + (posX * 8);
RGBs [ 0 ] = sourceImage [ posSrc ];
RGBs [ 1 ] = sourceImage [ posSrc + 2 ];
RGBs [ 2 ] = sourceImage [ posSrc + 4 ];
RGBs [ 3 ] = sourceImage [ posSrc + 6 ];
for ( i=0; i<4; i++ ) {
RGB = RGBs [ i ];
blue = RGB & 0xff; green = (RGB >> 8) & 0xff; red = (RGB >> 16) & 0xff;
Value = ( ( -38 * red + -74 * green + 112 * blue ) >> 8 ) + 128;
Value4 |= (Value << (i * 8));
}
yPlaneOffset = yuvStride * srcHeight;
posOffset = (posY * yuvStrideHalf) + posX;
destImage [ yPlaneOffset + posOffset ] = Value4;
return;
}
posY -= srcHeightHalf;
if ( posY >= srcHeightHalf )
return;
// V plane - pack 4 v's within each work item
posSrc = ((posY * 2) * srcWidth) + (posX * 8);
RGBs [ 0 ] = sourceImage [ posSrc ];
RGBs [ 1 ] = sourceImage [ posSrc + 2 ];
RGBs [ 2 ] = sourceImage [ posSrc + 4 ];
RGBs [ 3 ] = sourceImage [ posSrc + 6 ];
for ( i=0; i<4; i++ ) {
RGB = RGBs [ i ];
blue = RGB & 0xff; green = (RGB >> 8) & 0xff; red = (RGB >> 16) & 0xff;
Value = ( ( 112 * red + -94 * green + -18 * blue ) >> 8 ) + 128;
Value4 |= (Value << (i * 8));
}
yPlaneOffset = yuvStride * srcHeight;
posOffset = (posY * yuvStrideHalf) + posX;
destImage [ yPlaneOffset + (yPlaneOffset >> 2) + posOffset ] = Value4;
return;
}
This code performs only global 32-bit memory access while 8-bit processing happens within each work item.
Oh.. and the proper code to invoke the kernel
unsigned int width = 1024;
unsigned int height = 768;
unsigned int frameSize = width * height;
const unsigned int argbSize = frameSize * 4; // ARGB pixels
const unsigned int yuvSize = frameSize + (frameSize >> 1); // Y,U,V planes
const unsigned int yuvStride = width >> 2; // since we pack 4 RGBs into "one" YYYY
// Allocates ARGB buffer
ocl_rgb_buffer = clCreateBuffer ( context, CL_MEM_READ_WRITE, argbSize, 0, &error );
// ... error handling ...
ocl_yuv_buffer = clCreateBuffer ( context, CL_MEM_READ_WRITE, yuvSize, 0, &error );
// ... error handling ...
error = clSetKernelArg ( kernel, 0, sizeof(cl_mem), &ocl_rgb_buffer );
error |= clSetKernelArg ( kernel, 1, sizeof(cl_mem), &ocl_yuv_buffer );
error |= clSetKernelArg ( kernel, 2, sizeof(unsigned int), &height);
error |= clSetKernelArg ( kernel, 3, sizeof(unsigned int), &width);
error |= clSetKernelArg ( kernel, 4, sizeof(unsigned int), &yuvStride);
// ... error handling ...
const size_t local_ws[] = { 16, 16 };
const size_t global_ws[] = { yuvStride + (yuvStride >> 1), height };
error = clEnqueueNDRangeKernel ( queue, kernel, 2, NULL, global_ws, local_ws, 0, NULL, NULL );
// ... error handling ...
Note: have a look at the work item calculations. Some additional code needs to be added (e.g. using mod so as to add sufficient spare items) to make sure that work item sizes fit to local work sizes.
Like this? Use int4 unless your platform can use int3. Also you can pack 5 pixels into an int16 so you are wasting 1/16 instead of 1/4 of the memory bandwidth.
__kernel void rgb2yuv( __global int3* input, __global int3* output){
rgb = input[get_global_id(0)];
R = rgb.x;
G = rgb.y;
B = rgb.z;
yuv.x = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
yuv.y = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
yuv.z = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
output[get_global_id(0)] = yuv;
}
Along with opencl specification data type int3 doesn't exists.
Page 123:
Supported values of n are 2, 4, 8, and 16...
In your kernel variables rgb, R, G, B, and yuv should be at least __private int4.
OpenCL 1.1 added support for typen where n = 3. However, I strongly recommend you don't use it. Different vendor implementations have different bugs, and it's not saving you anything.