Generate ordered colors programmatically - ios

How to generate ordered color range programmatically from one color to another?
If we have this color range we need to cover:
0 0 255 255 255
179 255 0 255 58
255 255 0 0 0
Those are blue, light blue, green, yellow, orange-reddish.
So far i have found a bunch of questions about generating random color range in HSV color scheme. I need ordered RGB and evenly distributed color range.
My previous solution was:
NSInteger input = (510.0f / 100.0f) * progressAmount;
input < 256 ? (_redColor = input) : (_redColor = 255);
input > 255 ? (_blueColor = 255 - (input - 255)) : (_blueColor = 255);
_indicatorColor = [UIColor colorWithRed:(CGFloat) _redColor / 255.0f green:0.0f blue:(CGFloat) _blueColor / 255.0f alpha:1.0f];
but now i need colors from more complex color range. Not just three like i had.

look at the image of rgb space and may be you will find some solution how to make simple way between desired colors.

I have duplicated functionality from CGGradient.
Wrote this function to get color from specific range.
// function: get color depending on min and max range for progress control
// param progress: current progress in progress range; eg. with range [-20, 60], progress can be any value in between
// param colors: array of float values representing color RGBA components in [0, 255] range
// param locationRange: array of float values representing amount of color for two colors
// param numElements: numer of colors and number of locations
// note: min and max range can be a combination of positive and negative values: [-20, 50], [0, 100], [30, 90], [-60, -30]
+ (UIColor*) colorForProgress:(CGFloat)progress lowerRange:(CGFloat)lowerRange upperRange:(CGFloat)upperRange colorRGBAArray:(CGFloat*)colors locationRange:(CGFloat*)locationRange numElements:(NSUInteger)numElements
{
NSAssert(colors != NULL, #"color array is NULL");
NSAssert(locationRange != NULL, #"numElements is NULL");
#ifdef DEBUG
for (int i = 0; i < numElements; i++)
{
NSAssert2(locationRange[i] >= 0.0f && locationRange[i] <= 1.0f, #"locationRange %d %6.2f not in range [0.0, 1.0]", i, locationRange[i]);
}
#endif
UIColor *resultColor;
// convert user range to local range
// normalize range to [0-n]
CGFloat rangeNormalized = upperRange - lowerRange;
NSLog(#"rangeNormalized: %6.2f", rangeNormalized);
// normalize input to range [0, range_normalized_max]
// r = progress - lowerRange
CGFloat progressNormalized = progress - lowerRange;
NSLog(#"progressNormalized: %6.2f", progressNormalized);
// map normalized range to [0, 100] percent
CGFloat progressPercent = (100.0f / rangeNormalized) * progressNormalized;
NSLog(#"progressPercent: %6.2f", progressPercent);
// map normalized progress to [0.0, 1.0]
CGFloat progressLocal = progressPercent / 100.0f;
NSLog(#"progress_01: %6.2f", progressLocal);
NSAssert(progressLocal >= 0.0f && progressLocal <= 1.0f, #"progress_01 not in range [0.0, 1.0]");
// find two colors for range_s
CGFloat b1 = 0, b2 = 0, *color1 = NULL, *color2 = NULL;
if (progressLocal < 1.0f)
{
for (int i = 0; i < numElements - 1; i++) // ingore last
{
if (progressLocal >= locationRange[i] && progressLocal < locationRange[i+1])
{
b1 = locationRange[i];
b2 = locationRange[i+1];
color1 = colors + (4 * i); // iterate colors
color2 = colors + (4 * (i+1));
break;
}
}
} else if (progressLocal == 1.0f)
{
b1 = locationRange[numElements - 2];
b2 = locationRange[numElements - 1];
color1 = colors + (4 * (numElements - 2));
color2 = colors + (4 * (numElements - 1));
}
NSLog(#"b1: %6.2f, b2: %6.2f", b1, b2);
NSLog(#"color1: %6.2f, %6.2f, %6.2f, %6.2f", color1[0], color1[1], color1[2], color1[3]);
NSLog(#"color2: %6.2f, %6.2f, %6.2f, %6.2f", color2[0], color2[1], color2[2], color2[3]);
CGFloat localRange = b2 - b1;
NSLog(#"localRange: %6.2f", localRange);
CGFloat localAmount = progressLocal - b1;
NSLog(#"localAmount: %6.2f", localAmount);
// colors
CGFloat newColors[4];
// each color component in both colors
for (int i = 0; i < 3; i++)
{
printf("\n");
NSLog(#"color1[%d]: %6.2f", i, color1[i]);
NSLog(#"color2[%d]: %6.2f", i, color2[i]);
newColors[i] = color1[i];
NSLog(#"newColors[%d]: %6.2f", i, newColors[i]);
CGFloat color_range = color1[i] > color2[i] ? (color1[i] - color2[i]) : (color2[i] - color1[i]);
NSLog(#"color_range: %6.2f", color_range);
// color_range map localRange
CGFloat incr = color_range / localRange;
NSLog(#"incr: %6.2f", incr);
CGFloat result = incr * localAmount;
if (color1[i] > color2[i])
{
result = -result;
}
NSLog(#"result: %6.2f", result);
newColors[i] += result;
NSLog(#"newColors[%d]: %6.2f", i, newColors[i]);
}
resultColor = [UIColor colorWithRed:newColors[0] / 255.0f green:newColors[1] / 255.0f blue:newColors[2] / 255.0f alpha:1.0f];
printUIColor(resultColor);
return resultColor;
}
I'll leave this question unanswered for some time in case anyone proposes better solution.

Related

Full range of Hues: HSV to RGB color conversion for OpenCV

The following code runs without exception on iOS (Xcode-v6.2 and openCV-v3.0beta). But for some reason the image the function returns is "black" !
The code is adapted from this link ! I tried to replace the oldish "IplImage*" by more modern "cv::Mat" matrices. Does anybody know if my function still has a mistake or why it would return a completely "black" image instead of a colored image in HSV-format.
By the way, the reason I would want to use this function [instead of cvtColor(cv_src, imgHSV, cv::COLOR_BGR2HSV)] is that I would like to get 0-255 range of Hue-values's (...since OpenCV only allows Hues up to 180 instead of 255).
// Create a HSV image from the RGB image using the full 8-bits, since OpenCV only allows Hues up to 180 instead of 255.
cv::Mat convertImageRGBtoHSV(cv::Mat imageRGB) {
float fR, fG, fB;
float fH, fS, fV;
const float FLOAT_TO_BYTE = 255.0f;
const float BYTE_TO_FLOAT = 1.0f / FLOAT_TO_BYTE;
// Create a blank HSV image
cv::Mat imageHSV(imageRGB.rows, imageRGB.cols, CV_8UC3);
int rowSizeHSV = (int)imageHSV.step; // Size of row in bytes, including extra padding.
char *imHSV = (char*)imageHSV.data; // Pointer to the start of the image pixels.
if (imageRGB.depth() == 8 && imageRGB.channels() == 3) {
std::vector<cv::Mat> planes(3);
cv::split(imageRGB, planes);
cv::Mat R = planes[2];
cv::Mat G = planes[1];
cv::Mat B = planes[0];
for(int y = 0; y < imageRGB.rows; ++y)
{
// get pointers to each row
cv::Vec3b* row = imageRGB.ptr<cv::Vec3b>(y);
// now scan the row
for(int x = 0; x < imageRGB.cols; ++x)
{
// Get the RGB pixel components. NOTE that OpenCV stores RGB pixels in B,G,R order.
cv::Vec3b pixel = row[x];
int bR = pixel[2];
int bG = pixel[1];
int bB = pixel[0];
// Convert from 8-bit integers to floats.
fR = bR * BYTE_TO_FLOAT;
fG = bG * BYTE_TO_FLOAT;
fB = bB * BYTE_TO_FLOAT;
// Convert from RGB to HSV, using float ranges 0.0 to 1.0.
float fDelta;
float fMin, fMax;
int iMax;
// Get the min and max, but use integer comparisons for slight speedup.
if (bB < bG) {
if (bB < bR) {
fMin = fB;
if (bR > bG) {
iMax = bR;
fMax = fR;
}
else {
iMax = bG;
fMax = fG;
}
}
else {
fMin = fR;
fMax = fG;
iMax = bG;
}
}
else {
if (bG < bR) {
fMin = fG;
if (bB > bR) {
fMax = fB;
iMax = bB;
}
else {
fMax = fR;
iMax = bR;
}
}
else {
fMin = fR;
fMax = fB;
iMax = bB;
}
}
fDelta = fMax - fMin;
fV = fMax; // Value (Brightness).
if (iMax != 0) { // Make sure it's not pure black.
fS = fDelta / fMax; // Saturation.
float ANGLE_TO_UNIT = 1.0f / (6.0f * fDelta); // Make the Hues between 0.0 to 1.0 instead of 6.0
if (iMax == bR) { // between yellow and magenta.
fH = (fG - fB) * ANGLE_TO_UNIT;
}
else if (iMax == bG) { // between cyan and yellow.
fH = (2.0f/6.0f) + ( fB - fR ) * ANGLE_TO_UNIT;
}
else { // between magenta and cyan.
fH = (4.0f/6.0f) + ( fR - fG ) * ANGLE_TO_UNIT;
}
// Wrap outlier Hues around the circle.
if (fH < 0.0f)
fH += 1.0f;
if (fH >= 1.0f)
fH -= 1.0f;
}
else {
// color is pure Black.
fS = 0;
fH = 0; // undefined hue
}
// Convert from floats to 8-bit integers.
int bH = (int)(0.5f + fH * 255.0f);
int bS = (int)(0.5f + fS * 255.0f);
int bV = (int)(0.5f + fV * 255.0f);
// Clip the values to make sure it fits within the 8bits.
if (bH > 255)
bH = 255;
if (bH < 0)
bH = 0;
if (bS > 255)
bS = 255;
if (bS < 0)
bS = 0;
if (bV > 255)
bV = 255;
if (bV < 0)
bV = 0;
// Set the HSV pixel components.
uchar *pHSV = (uchar*)(imHSV + y*rowSizeHSV + x*3);
*(pHSV+0) = bH; // H component
*(pHSV+1) = bS; // S component
*(pHSV+2) = bV; // V component
}
}
}
return imageHSV;
}
The cv::Mat M.depth() of a CV_8UC3-type matrix does unfortunately not return 8 - but instead it returns 0
Please have a look at the file "type_c.h"
#define CV_8U 0
#define CV_CN_SHIFT 3
#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT))
#define CV_8UC3 CV_MAKETYPE(CV_8U,3)
depth() doesn't return the actual bit depth but the number symbol that represents the depth !!
After replacing to the following line - it all works !! (i.e. replacing .depth() by .type() in the if-statement...)
if (imageHSV.type() == CV_8UC3 && imageHSV.channels() == 3) {...}

Triangle Gradient With Core Graphics

I'm trying to draw a triangle like this one in a view (one UIView, one NSView):
My first thought was CoreGraphics, but I couldn't find any information that would help me draw a gradient between three points of arbitrary color.
Any help?
Thanks!
Actually it's pretty simple with CoreGraphics. Below you can find code that renders given triangle, but first let's think how we can solve this problem.
Theory
Imagine equilateral triangle with side length w. All three angles are equal to 60 degrees:
Each angle will represent component of a pixel: red, green or blue.
Lets analyze intensity of a green component in a pixel near top angle:
The more closer pixel to the angle, the more component intense it'll have and vice versa. Here we can decompose our main goal to smaller ones:
Draw triangle pixel by pixel.
For each pixel calculate value for each component based on distance from corresponding angle.
To solve first task we will use CoreGraphics bitmap context. It will have four components per pixel each 8 bits long. This means that component value may vary from 0 to 255. Fourth component is alpha channel and will be always equal to max value - 255. Here is example of how values will be interpolated for the top angle:
Now we need to think how we can calculate value for component.
First, let's define main color for each angle:
Now let's choose an arbitrary point A with coordinates (x,y) on the triangle:
Next, we draw a line from an angle associated with red component and it passes through the A till it intersects with opposite side of a triangle:
If we could find d and c their quotient will equal to normalized value of component, so value can be calculated easily:
(source: sciweavers.org)
Formula for finding distance between two points is simple:
(source: sciweavers.org)
We can easily find distance for d, but not for c, because we don't have coordinates of intersection. Actually it's not that hard. We just need to build line equations for line that passes through A and line that describes opposite side of a triangle and find their intersection:
Having intersection point we can apply distance formula to find c and finally calculate component value for current point.
Same flow applies for another components.
Code
Here is the code that implements concepts above:
+ (UIImage *)triangleWithSideLength:(CGFloat)sideLength {
return [self triangleWithSideLength:sideLength scale:[UIScreen mainScreen].scale];
}
+ (UIImage *)triangleWithSideLength:(CGFloat)sideLength
scale:(CGFloat)scale {
UIImage *image = nil;
CGSize size = CGSizeApplyAffineTransform((CGSize){sideLength, sideLength * sin(M_PI / 3)}, CGAffineTransformMakeScale(scale, scale));
size_t const numberOfComponents = 4;
size_t width = ceilf(size.width);
size_t height = ceilf(size.height);
size_t realBytesPerRow = width * numberOfComponents;
size_t alignedBytesPerRow = (realBytesPerRow + 0xFF) & ~0xFF;
size_t alignedPixelsPerRow = alignedBytesPerRow / numberOfComponents;
CGContextRef ctx = CGBitmapContextCreate(NULL,
width,
height,
8,
alignedBytesPerRow,
CGColorSpaceCreateDeviceRGB(),
(CGBitmapInfo)kCGImageAlphaPremultipliedLast);
char *data = CGBitmapContextGetData(ctx);
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
int edge = ceilf((height - i) / sqrt(3));
if (j < edge || j > width - edge) {
continue;
}
CGFloat redNormalized = 0;
CGFloat greenNormalized = 0;
CGFloat blueNormalized = 0;
CGPoint currentTrianglePoint = (CGPoint){j / scale, (height - i) / scale};
[self calculateCurrentValuesAtGiventPoint:currentTrianglePoint
sideLength:sideLength
sideOne:&redNormalized
sideTwo:&greenNormalized
sideThree:&blueNormalized];
int32_t red = redNormalized * 0xFF;
int32_t green = greenNormalized * 0xFF;
int32_t blue = blueNormalized * 0xFF;
char *pixel = data + (j + i * alignedPixelsPerRow) * numberOfComponents;
*pixel = red;
*(pixel + 1) = green;
*(pixel + 2) = blue;
*(pixel + 3) = 0xFF;
}
}
CGImageRef cgImage = CGBitmapContextCreateImage(ctx);
image = [[UIImage alloc] initWithCGImage:cgImage];
CGContextRelease(ctx);
CGImageRelease(cgImage);
return image;
}
+ (void)calculateCurrentValuesAtGiventPoint:(CGPoint)point
sideLength:(CGFloat)length
sideOne:(out CGFloat *)sideOne
sideTwo:(out CGFloat *)sideTwo
sideThree:(out CGFloat *)sideThree {
CGFloat height = sin(M_PI / 3) * length;
if (sideOne != NULL) {
// Side one is at 0, 0
CGFloat currentDistance = sqrt(point.x * point.x + point.y * point.y);
if (currentDistance != 0) {
CGFloat a = point.y / point.x;
CGFloat b = 0;
CGFloat c = -height / (length / 2);
CGFloat d = 2 * height;
CGPoint intersection = (CGPoint){(d - b) / (a - c), (a * d - c * b) / (a - c)};
CGFloat currentH = sqrt(intersection.x * intersection.x + intersection.y * intersection.y);
*sideOne = 1 - currentDistance / currentH;
} else {
*sideOne = 1;
}
}
if (sideTwo != NULL) {
// Side two is at w, 0
CGFloat currentDistance = sqrt(pow((point.x - length), 2) + point.y * point.y);
if (currentDistance != 0) {
CGFloat a = point.y / (point.x - length);
CGFloat b = height / (length / 2);
CGFloat c = a * -point.x + point.y;
CGFloat d = b * -length / 2 + height;
CGPoint intersection = (CGPoint){(d - c) / (a - b), (a * d - b * c) / (a - b)};
CGFloat currentH = sqrt(pow(length - intersection.x, 2) + intersection.y * intersection.y);
*sideTwo = 1 - currentDistance / currentH;
} else {
*sideTwo = 1;
}
}
if (sideThree != NULL) {
// Side three is at w / 2, w * sin60 degrees
CGFloat currentDistance = sqrt(pow((point.x - length / 2), 2) + pow(point.y - height, 2));
if (currentDistance != 0) {
float dy = point.y - height;
float dx = (point.x - length / 2);
if (fabs(dx) > FLT_EPSILON) {
CGFloat a = dy / dx;
CGFloat b = 0;
CGFloat c = a * -point.x + point.y;
CGFloat d = 0;
CGPoint intersection = (CGPoint){(d - c) / (a - b), (a * d - b * c) / (a - b)};
CGFloat currentH = sqrt(pow(length / 2 - intersection.x, 2) + pow(height - intersection.y, 2));
*sideThree = 1 - currentDistance / currentH;
} else {
*sideThree = 1 - currentDistance / height;
}
} else {
*sideThree = 1;
}
}
}
Here is a triangle image produced by this code:

Create ColorCube CIFilter

I want to create ColorCube CIFilter for my app and i found documentation on apple site here https://developer.apple.com/library/ios/documentation/GraphicsImaging/Conceptual/CoreImaging/ci_filer_recipes/ci_filter_recipes.html .
Also i post code here,
**//Allocate memory **
**const unsigned int size = 64;**
**float *cubeData = (float *)malloc (size * size * size * sizeof (float) * 4);**
float rgb[3], hsv[3], *c = cubeData;
// Populate cube with a simple gradient going from 0 to 1
for (int z = 0; z < size; z++){
rgb[2] = ((double)z)/(size-1); // Blue value
for (int y = 0; y < size; y++){
rgb[1] = ((double)y)/(size-1); // Green value
for (int x = 0; x < size; x ++){
rgb[0] = ((double)x)/(size-1); // Red value
// Convert RGB to HSV
// You can find publicly available rgbToHSV functions on the Internet
rgbToHSV(rgb, hsv);
// Use the hue value to determine which to make transparent
// The minimum and maximum hue angle depends on
// the color you want to remove
float alpha = (hsv[0] > minHueAngle && hsv[0] < maxHueAngle) ? 0.0f: 1.0f;
// Calculate premultiplied alpha values for the cube
c[0] = rgb[0] * alpha;
c[1] = rgb[1] * alpha;
c[2] = rgb[2] * alpha;
c[3] = alpha;
c += 4; // advance our pointer into memory for the next color value
}
}
}
i want to know what they take size=64 wand what the mean of that bold line in code?
Any help appreciated...

Drawing Image with Alternating Colors

How can we draw a background pattern (to be set to UIImageView) programmatically like the following?
It has alternating colored squares like 20 x 20 pixels. I can do it with REAL Stupid and MS Visual Basic. I have never done it with iOS. If I run a search, one clue that I get is colorWithPatternImage. I used the following REAL Stupid code a few years ago. It works regardless of the dimensions of the canvas (equivalent to UIImageView).
Dim i,j As Integer
For j=0 To Ceil(CanvasX.Height/20)
For i=0 To Ceil(CanvasX.Width/20)
If i Mod 2=0 And j Mod 2=0 Then
If CField1.text="1" Then
g.ForeColor=&cCC9900
Elseif CField1.text="2" Then
g.ForeColor=&c000000
Else
g.ForeColor=&cCCCCCC
End if
Elseif i Mod 2>0 And j Mod 2>0 Then
If CField1.text="1" Then
g.ForeColor=&cCC9900
Else
g.ForeColor=&cCCCCCC
End if
Else
If CField1.text="1" Then
g.ForeColor=&cE6E6E6
Else
g.ForeColor=&cFFFFFF
End if
End if
g.FillRect i*20,j*20,20,20
Next i
Next j
Thank you for your help.
Approach #1: take this image:
Then set a background color by specifying a pattern image:
UIImage *bgImage = [UIImage imageNamed:#"squares"];
UIColor *bgColor = [UIColor colorWithPatternImage:bgImage];
someView.backgroundColor = bgColor;
Approach #2: use Quartz. Subclass UIView, then implement the following method:
- (void)drawRect:(CGRect)rect
{
[super drawRect:rect];
CGContextRef ctx = UIGraphicsGetCurrentContext();
NSLog(#"%#", ctx);
CGFloat ws = self.frame.size.width;
CGFloat hs = self.frame.size.height;
const int side = 10;
int nx = ws / side;
int ny = hs / side;
CGRect rects[nx / 2];
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j += 2) {
rects[j / 2] = CGRectMake(j * side, i * side, side, side);
}
const static CGFloat w[4] = { 1.0, 1.0, 1.0, 1.0 };
const static CGFloat g[4] = { .75, .75, .75, .75 };
if (i % 2) {
CGContextSetFillColor(ctx, g);
} else {
CGContextSetFillColor(ctx, w);
}
CGContextFillRects(ctx, rects, nx / 2);
for (int j = 1; j < nx; j += 2) {
rects[j / 2] = CGRectMake(j * side, i * side, side, side);
}
if (i % 2) {
CGContextSetFillColor(ctx, w);
} else {
CGContextSetFillColor(ctx, g);
}
CGContextFillRects(ctx, rects, nx / 2);
}
}

equalize/normalize Hue Saturation Brightness in color images with OpenCV

i want to equalize two half face color images of the same subject and then merge them. Each of them has different values of hue saturation and brightness....using opencv how can i normalize/equalize each half image?
I tried performing cvEqualizeHist(v, v); on the v value of the converted HSV image, but two images still have significant difference and after the merge still has a line between the colors of the two halves...thanks
Have u tried to read this link? http://answers.opencv.org/question/75510/how-to-make-auto-adjustmentsbrightness-and-contrast-for-image-android-opencv-image-correction/
void Utils::BrightnessAndContrastAuto(const cv::Mat &src, cv::Mat &dst, float clipHistPercent)
{
CV_Assert(clipHistPercent >= 0);
CV_Assert((src.type() == CV_8UC1) || (src.type() == CV_8UC3) || (src.type() == CV_8UC4));
int histSize = 256;
float alpha, beta;
double minGray = 0, maxGray = 0;
//to calculate grayscale histogram
cv::Mat gray;
if (src.type() == CV_8UC1) gray = src;
else if (src.type() == CV_8UC3) cvtColor(src, gray, CV_BGR2GRAY);
else if (src.type() == CV_8UC4) cvtColor(src, gray, CV_BGRA2GRAY);
if (clipHistPercent == 0)
{
// keep full available range
cv::minMaxLoc(gray, &minGray, &maxGray);
}
else
{
cv::Mat hist; //the grayscale histogram
float range[] = { 0, 256 };
const float* histRange = { range };
bool uniform = true;
bool accumulate = false;
calcHist(&gray, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, uniform, accumulate);
// calculate cumulative distribution from the histogram
std::vector<float> accumulator(histSize);
accumulator[0] = hist.at<float>(0);
for (int i = 1; i < histSize; i++)
{
accumulator[i] = accumulator[i - 1] + hist.at<float>(i);
}
// locate points that cuts at required value
float max = accumulator.back();
clipHistPercent *= (max / 100.0); //make percent as absolute
clipHistPercent /= 2.0; // left and right wings
// locate left cut
minGray = 0;
while (accumulator[minGray] < clipHistPercent)
minGray++;
// locate right cut
maxGray = histSize - 1;
while (accumulator[maxGray] >= (max - clipHistPercent))
maxGray--;
}
// current range
float inputRange = maxGray - minGray;
alpha = (histSize - 1) / inputRange; // alpha expands current range to histsize range
beta = -minGray * alpha; // beta shifts current range so that minGray will go to 0
// Apply brightness and contrast normalization
// convertTo operates with saurate_cast
src.convertTo(dst, -1, alpha, beta);
// restore alpha channel from source
if (dst.type() == CV_8UC4)
{
int from_to[] = { 3, 3 };
cv::mixChannels(&src, 4, &dst, 1, from_to, 1);
}
return;
}
I'm not sure as I'm now facing the same problem,
but maybe try to equalize the H & S values instead of the V?
Also try manually adjusting it using Photoshop to see what works best and then try to replicate it using code.

Resources