Get pixels value from png file. which is right & why? - ios

I need pixel value from png file. so I searched SO and got two methods as follows:
Method 1
- (void)GeneratePixelArray {
UIImage *cImage = [UIImage imageNamed:#"ball.png"];
int width = (int)cImage.size.width;
int height = (int)cImage.size.height;
unsigned char *cMap = (unsigned char *)malloc(width * height);
memset(cMap, 0, width * height);
CFDataRef imageData = CGDataProviderCopyData(CGImageGetDataProvider(cImage.CGImage));
const UInt32 *pixels = (const UInt32*)CFDataGetBytePtr(imageData);
//0xff 0x00 is guard for this demo
for (int j = 0; j < (width * height); j++)
{
printf("0x%x\n", (unsigned int)pixels[j]);
}
CFRelease(imageData);
}
Method 2
- (void *)GeneratePixelArray//: (UIImage *) image
{
UIImage *cImage = [[UIImage alloc] imageNamed:#"ball.png"];
//UIImage *cImage = [UIImage imageNamed:#"ball.png"];
int pixelsWidth = (int)cImage.size.width;
int pixelsHeight = (int)cImage.size.height;
CGRect rect = {{0,0}, {pixelsWidth, pixelsHeight}};
// Use RCG color space without Alpha, just RGB
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
if(colorSpace == NULL)
{
NSLog(#"Error allocating color space! \n");
return NULL;
}
unsigned int bitmapBytesPerRow = pixelsWidth*4;
unsigned int bitmapByteCount = bitmapBytesPerRow*pixelsHeight;
void * bitmapData = malloc(bitmapByteCount);
if (bitmapData == NULL) {
NSLog(#"Memory not allocated!\n");
CGColorSpaceRelease(colorSpace);
return NULL;
}
// create bitmap context ,8 bits per component
CGContextRef context = CGBitmapContextCreate(bitmapData,
pixelsWidth,
pixelsHeight,
8,//bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast);
// Make sure release colorspace before returning
CGColorSpaceRelease(colorSpace);
if (context == NULL)
{
free(bitmapData);
NSLog(#"Context not be created!");
return NULL;
}
// Draw the image to the bitmap context
CGContextDrawImage(context, rect, cImage.CGImage);
// Now we can get a pointer to the image data associated with context
unsigned char *bitsData;
bitsData = CGBitmapContextGetData(context);
if (!bitsData)
{
NSLog(#"Failed");
return;
}
void *data = CGBitmapContextGetData(context);
unsigned char* bitmapData2 = malloc(bitmapByteCount);
if (bitmapData2 == NULL)
{
NSLog(#"Memory not be allocated!\n");
return NULL;
}
unsigned char* rcdata = (unsigned char *)data;
unsigned char* wcdata = bitmapData2;
// remove ARGB's fourth value, it is Alpha
for (int i = 0; i < bitmapByteCount / 4; ++i, rcdata += 4)
{
printf("%x\n", (unsigned int)*(unsigned int*)rcdata);
*(wcdata + 0) = *(rcdata + 0);
*(wcdata + 1) = *(rcdata + 1);
*(wcdata + 2) = *(rcdata + 2);
*(wcdata + 3) = *(rcdata + 3);
if (*(wcdata+3)<20) {
printf("alpha...\n");
}
// if ( (*(wcdata + 0)==255) &&(*(wcdata + 1)==0) && (*(wcdata + 2)==0) ) {
// printf("red\n");
// }
wcdata += 4;// skip alpha
}
CGContextRelease(context);
return bitsData;
}
I logged output using:
printf("%x\n", (unsigned int)*(unsigned int*)rcdata);
Method 1 and Method 2 logs differ! Why? I am confused.
Thanks a lot!

Related

How to handle CT(Computed Tomography) grayscale binary data to RGB UIimage

Here is a binary CT grayscale data down the file/date_byte1
The aim is to change the window wide window to change the CT image
The two bytes in the data are 1 pixels, and the width and height are 512 pixels
How to transform the pixel array RGB array into a bitmap
- (void)viewDidLoad {
[super viewDidLoad];
NSData *data = [[NSData alloc]initWithContentsOfFile:[[NSBundle mainBundle] pathForResource:#"data_byte1" ofType:#""]];
Byte *testByte = (Byte *)[data bytes];
self.imageView.image = [self imageFromGRAYBytes:testByte imageSize:CGSizeMake(512, 512)];
}
- (UIImage *)imageFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGImageRef imageRef = [self imageRefFromGRAYBytes:imageBytes imageSize:imageSize];
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
return image;
}
- (CGImageRef)imageRefFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate(imageBytes,
imageSize.width,
imageSize.height,
16,
imageSize.width * 2,
colorSpace,
kCGImageAlphaNone);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return imageRef;
}
I'm going through the following methodGet the picture
Obtained original diagram
But I want to change the display of the picture through this method
- (int)getPresentValueV:(int)v AndWidth:(int)w AndHeight:(int)h{
int minv = h - roundf(w/2);
if (v < minv) {
return 0;
}
int maxv = h + roundf(w/2);
if (v > maxv) {
return 255;
}
int pv = roundf(255*(v - minv)/w);
if (pv < 0 ){
return 0;
}
if( pv > 255 ){
return 255;
}
return pv;
}
This method changes the output of PV use w h, which is less than 255, The default value of w 1600 . The default value of h -400.Next based on PV as an array of RGB (PV, PV, PV) to create UIimage.
for (unsigned i = 0; i < data.length/2; i++) {
NSData *intData = [data subdataWithRange:NSMakeRange(i*2, 2)];
[arrray addObject:#([self getPresentValueV: *(int*)([intData bytes]) AndWidth:1600 AndHeight:-400])];
}
So I want to choose the different w h by the getPresentValueV method above, and get the PV to generate the RGB (PV, PV, PV) array to generate a new picture.
- (UIImage *)imageWithGrayArray:(NSArray *)array {
const int width = 512;
const int height = 512;
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
UInt32 * pixels = (UInt32 *)malloc(height * width * sizeof(UInt32));
memset(pixels, 0, height * width * sizeof(UInt32));
for (int i = 0; i < array.count;i++ ) {
NSArray *subArray = array[i];
for ( int j = 0 ; j < subArray.count; j++) {
*(pixels +i *width + j) = makeRGBAColor([subArray[j] integerValue],0, 0, 255);
}
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixels,
width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
UIImage *image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
return image;
}
I generated a two-dimensional array to generate a new picture, but it was not the correct result.

Extract subimage of CVImageBufferRef

I simply want to extract a small area of a YUV 420 image. That is, create a CVImageBufferRef from a CVImageBufferRef which only contains a rectangular part of the original image.
Here is what I tried so far:
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 320;
size_t cropWidthY = 320;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// create a new CVImageBufferRef from pixelY and pixelUV
CVPixelBufferRef outBuff;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outBuff);
if(logCameraSettings) {
NSLog(#"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outBuff, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outBuff, 0);
NSLog(#"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Here would be the place where I actually want to do something with the image
// TEST: show image (in debugger in following method)
[self convertToUIImage:imageBufferRef]; // --> works
[self convertToUIImage:outBuff]; // --> only gray, does not work
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(void) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
// Inspect the following UIImage in debugger.
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
}
In the above code I created a small function convertToUIImage which has no purpose except to let me inspect the CVImageBufferRef I created as a UIImage in the debugger.
Inspecting imageBufferRef shows me the correct camera feed.
Inspecting outBuff does however not show me a small area of that camera feed but an all gray patch of the correct size.
So my question is:
What am I doing wrong here?
Is this even the correct way to go to achieve my goal?
Any help is really appreciated. Thank you in advance.
Here is how I solved it
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 500;
size_t cropWidthY = 500;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// Transform input to UIImage
UIImage *inputAsUIImage = [self convertToUIImage:imageBufferRef];
// Extract subimage of UIImage
CGRect fromRect = CGRectMake(cropY_X0, cropY_Y0, cropWidthY, cropHeightY); // or whatever rectangle
CGImageRef drawImage = CGImageCreateWithImageInRect(inputAsUIImage.CGImage, fromRect);
UIImage *newImage = [UIImage imageWithCGImage:drawImage];
CGImageRelease(drawImage);
// Convert UIImage back to CVImageBufferRef
// 1. Create a CIImage with the underlying CGImage encapsulated by the UIImage (referred to as 'image'):
CIImage *inputImage = [CIImage imageWithCGImage:newImage.CGImage];
// 2. Create a CIContext:
CIContext *ciContext = [CIContext contextWithCGContext:UIGraphicsGetCurrentContext() options:nil];
// 3. Render the CIImage to a CVPixelBuffer (referred to as 'outputBuffer'):
CVPixelBufferRef outputBuffer;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outputBuffer);
[ciContext render:inputImage toCVPixelBuffer:outputBuffer];
if(logCameraSettings) {
NSLog(#"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outputBuffer, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outputBuffer, 0);
NSLog(#"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Do something with it here
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(UIImage*) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
return image;
}

For IOS, how to use vImage to convert an ARGB image to Gray image

I am a freshman for IOS APP development, and I met a problem: " how to use vImage to convert an ARGB image to Gray image ". Actually, I have realized this conversion processing by other methods. However, I have found "vImage" could complete this work through "Accelerate Framework Reference".
I have tried this method, but there is no any conversion result. Here, I attach the code, can you help me?
-(UIImage *)rgbTograyByvImage
{
const size_t width = self.size.width * self.scale;
const size_t height = self.size.height * self.scale;
const size_t bytesPerRow = width * 4;
CGColorSpaceRef space = CGColorSpaceCreateDeviceRGB();
CGContextRef bmContext= CGBitmapContextCreate(NULL, width, height, 8, bytesPerRow, space, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedFirst);
CGColorSpaceRelease(space);
if (!bmContext) {
return nil;
}
CGContextDrawImage(bmContext, (CGRect){.origin.x = 0.0f, .origin.y = 0.0f, .size.width = width, .size.height = height}, self.CGImage);
UInt32 *data = (UInt32 *)CGBitmapContextGetData(bmContext);
if (!data) {
CGContextRelease(bmContext);
return nil;
}
int Aphal = 0;
int Red = 1;
int Green = 2;
int Blue = 3;
UInt8 *aphalSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *redSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *greenSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *blueSpace = malloc(sizeof(UInt8) * width * height);
for (int y=0; y<height; y++) {
for (int x=0; x<width; x++) {
UInt8 *argbPixel = (UInt8 *) (&data[y*width+x]);
aphalSpace[y*width+x] = argbPixel[Aphal];
redSpace[y*width+x] = argbPixel[Red];
greenSpace[y*width+x] = argbPixel[Green];
blueSpace[y*width+x] = argbPixel[Blue];
}
}
vImage_Buffer argbImageBuffer = {(UInt8*)data, height, width, bytesPerRow};
vImage_Buffer aImageBuffer = {aphalSpace, height, width, width};
vImage_Buffer rImageBuffer = {redSpace, height, width, width};
vImage_Buffer gImageBuffer = {greenSpace, height, width, width};
vImage_Buffer bImageBuffer = {blueSpace, height, width, width};
vImage_Error error;
error = vImageConvert_ARGB8888toPlanar8(&argbImageBuffer, &aImageBuffer, &rImageBuffer, &gImageBuffer, &bImageBuffer, kvImageNoFlags);
if (error != kvImageNoError) {
NSLog(#"%s, vImage error %zd", __PRETTY_FUNCTION__, error);
}
CGImageRef grayImage = CGBitmapContextCreateImage(bmContext);
UIImage *gray = [UIImage imageWithCGImage:grayImage];
CGContextRelease(bmContext);
CGImageRelease(grayImage);
free(aphalSpace);
free(redSpace);
free(greenSpace);
free(blueSpace);
return gray;
}
You want vImageMatrixMultiply_ARGB8888ToPlanar8, new for iOS 9.

iOS: Feather with glow/shadow effect on uiimage

I am trying to find a way to apply feather effect with shadow around the UIImage, not UIImageView I have in iOS, I couldn't find any perfect solution yet. I have an idea that it might be done by masking, but I am very new to CoreGraphics.
If anyone can help.
Thanks.
OK so:
I was looking for same thing, but unfortunately with no luck.
I decided to create my own feather(ing) code.
add this code to the UIImage extension, and then call [image featherImageWithDepth:4], 4 is just example.
Try to keep depth as low as possible.
//==============================================================================
- (UIImage*)featherImageWithDepth:(int)featherDepth {
// First get the image into your data buffer
CGImageRef imageRef = [self CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
// Now your rawData contains the image data in the RGBA8888 pixel format.
NSUInteger byteIndex = 0;
NSUInteger rawDataCount = width*height;
for (int i = 0 ; i < rawDataCount ; ++i, byteIndex += bytesPerPixel) {
NSInteger alphaIndex = byteIndex + 3;
if (rawData[alphaIndex] > 100) {
for (int row = 1; row <= featherDepth; row++) {
if (testBorderLayer((long)alphaIndex,
rawData,
(long)rawDataCount,
(long)width,
(long)height,
row)) {
int destinationAlpha = 255 / (featherDepth+1) * (row + 1);
double alphaDiv = (double)destinationAlpha / (double)rawData[alphaIndex];
rawData[alphaIndex] = destinationAlpha;
rawData[alphaIndex-1] = (double)rawData[alphaIndex-1] * alphaDiv;
rawData[alphaIndex-2] = (double)rawData[alphaIndex-2] * alphaDiv;
rawData[alphaIndex-3] = (double)rawData[alphaIndex-3] * alphaDiv;
// switch (row) {
// case 1:
// rawData[alphaIndex-1] = 255;
// rawData[alphaIndex-2] = 0;
// rawData[alphaIndex-3] = 0;
// break;
// case 2:
// rawData[alphaIndex-1] = 0;
// rawData[alphaIndex-2] = 255;
// rawData[alphaIndex-3] = 0;
// break;
// case 3:
// rawData[alphaIndex-1] = 0;
// rawData[alphaIndex-2] = 0;
// rawData[alphaIndex-3] = 255;
// break;
// case 4:
// rawData[alphaIndex-1] = 127;
// rawData[alphaIndex-2] = 127;
// rawData[alphaIndex-3] = 0;
// break;
// case 5:
// rawData[alphaIndex-1] = 127;
// rawData[alphaIndex-2] = 0;
// rawData[alphaIndex-3] = 127;
// case 6:
// rawData[alphaIndex-1] = 0;
// rawData[alphaIndex-2] = 127;
// rawData[alphaIndex-3] = 127;
// break;
// default:
// break;
// }
break;
}
}
}
}
CGImageRef newCGImage = CGBitmapContextCreateImage(context);
UIImage *result = [UIImage imageWithCGImage:newCGImage scale:[self scale] orientation:UIImageOrientationUp];
CGImageRelease(newCGImage);
CGContextRelease(context);
free(rawData);
return result;
}
//==============================================================================
bool testBorderLayer(long byteIndex,
unsigned char *imageData,
long dataSize,
long pWidth,
long pHeight,
int border) {
int width = border * 2 + 1;
int height = width - 2;
// run thru border pixels
// |-|
// | |
// |-|
//top,bot - hor
for (int i = 1; i < width - 1; i++) {
long topIndex = byteIndex + 4 * ( - border * pWidth - border + i);
long botIndex = byteIndex + 4 * ( border * pWidth - border + i);
long destColl = byteIndex/4 % pWidth - border + i;
if (destColl > 1 && destColl < pWidth) {
if (testPoint(topIndex, imageData, dataSize) ||
testPoint(botIndex, imageData, dataSize)) {
return true;
}
}
}
//left,right - ver
if (byteIndex / 4 % pWidth < pWidth - border - 1) {
for (int k = 0; k < height; k++) {
long rightIndex = byteIndex + 4 * ( border - (border) * pWidth + pWidth * k);
if (testPoint(rightIndex, imageData, dataSize)) {
return true;
}
}
}
if (byteIndex / 4 % pWidth > border) {
for (int k = 0; k < height; k++) {
long leftIndex = byteIndex + 4 * ( - border - (border) * pWidth + pWidth * k);
if (testPoint(leftIndex, imageData, dataSize)) {
return true;
}
}
}
return false;
}
//==============================================================================
bool testPoint(long pointIndex, unsigned char *imageData, long dataSize) {
if (pointIndex >= 0 && pointIndex < dataSize * 4 - 1 &&
imageData[pointIndex] < 30) {
return true;
}
return false;
}
//==============================================================================
Sorry for rare commenting ;)
I suggest looking at this CIFilter list put out by apple, they got some pretty decent filters.
/reference/CoreImageFilterReference
And also check out GPUImage.
https://github.com/BradLarson/GPUImage

Negative value color apply into Image

I want to apply negative value of RGB into image.
R: -8
G: -89
B: -76
I know that the value of RGB is in between 0 to 255 but i want to apply minus value and set offset something like that but don't know how to do.
I am using following link for reference.
http://blog.swishzone.com/?p=9606
I am using
red value = (original red value * redMultiplier) + redOffset
green value = (original green value * greenMultiplier) + greenOffset
blue value = (original blue value * blueMultiplier) + blueOffset
But it not working as i want.
Found one solution for change image color using negative RGB values. Following is the method without affecting alpha of image.
-(void)ColorChangeProcessing :(int )redvalue greenValue:(int)greenvalue blueValue:(int)bluevalue imageUsed : (UIImageView *)image
{
CGContextRef ctx;
CGImageRef imageRef = [image.image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
int byteIndex = (bytesPerRow * 0) + 0 * bytesPerPixel,RED = redvalue,GREEN=greenvalue,BLUE = bluevalue;
for (int ii = 0 ; ii < width * height ; ++ii)
{
if( rawData[byteIndex+3] != 0 && (rawData[byteIndex] != '/0' || rawData[byteIndex+1] != '/0' || rawData[byteIndex+2] != '/0' ))
{
if ((((rawData[byteIndex])+RED)) > 255)
{
rawData[byteIndex] = (char)255;
}
else if((((rawData[byteIndex])+RED)) >0)
{
rawData[byteIndex] = (char) (((rawData[byteIndex] * 1.0) + RED));
}
else
{
rawData[byteIndex] = (char)0;
}
if ((((rawData[byteIndex+1])+GREEN)) > 255)
{
rawData[byteIndex+1] = (char)255;
}
else if((((rawData[byteIndex+1])+GREEN))>0)
{
rawData[byteIndex+1] = (char) (((rawData[byteIndex+1] * 1.0) + GREEN));
}
else
{
rawData[byteIndex+1] = (char)0;
}
if ((((rawData[byteIndex+2])+BLUE)) > 255)
{
rawData[byteIndex+2] = (char)255;
}
else if((((rawData[byteIndex+2])+BLUE))>0)
{
rawData[byteIndex+2] = (char) (((rawData[byteIndex+2] * 1.0) + BLUE));
}
else
{
rawData[byteIndex+2] = (char)0;
}
}
byteIndex += 4;
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
CGImageRef NewimageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:NewimageRef];
tempViewForProcessingColors = [[UIImageView alloc] init];
tempViewForProcessingColors = [[arrayWithDictionary objectAtIndex:ImageCount]valueForKey:#"imageView"];
// NSLog(#"Name: %# --- Red: %d --- Green: %d --- Blue: %d",tempViewForProcessingColors.accessibilityLabel,RED,GREEN,BLUE);
tempViewForProcessingColors.image = rawImage;
//ImageCount++;
CGContextRelease(ctx);
free(rawData);
CGImageRelease(NewimageRef);
}

Resources