In my program,
I have a method which create random generated imaged,
Just notice with instruments that it׳s leaking memory but i׳m not sure why.
Here is the part that leaking:
int * getRandomImage(int width, int height) {
int * bytes = new int[height * width]; //this line is pointed by instruments
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
bytes[i * width + j] = arc4random();
}
}
return bytes
}
-(void) loadRandomImage {
const int height = 400;
const int width = 400;
int * bytes = getRandomImage(width, height);
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, bytes, width * height, NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, kCGBitmapByteOrderDefault, provider, NULL, NO, kCGRenderingIntentDefault);
CGDataProviderRelease(provider);
UIImage *newUIImage = [UIImage imageWithCGImage:imageRef];
self.imageView.image = newUIImage;
CGImageRelease(imageRef);
}
Related
Here is a binary CT grayscale data down the file/date_byte1
The aim is to change the window wide window to change the CT image
The two bytes in the data are 1 pixels, and the width and height are 512 pixels
How to transform the pixel array RGB array into a bitmap
- (void)viewDidLoad {
[super viewDidLoad];
NSData *data = [[NSData alloc]initWithContentsOfFile:[[NSBundle mainBundle] pathForResource:#"data_byte1" ofType:#""]];
Byte *testByte = (Byte *)[data bytes];
self.imageView.image = [self imageFromGRAYBytes:testByte imageSize:CGSizeMake(512, 512)];
}
- (UIImage *)imageFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGImageRef imageRef = [self imageRefFromGRAYBytes:imageBytes imageSize:imageSize];
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
return image;
}
- (CGImageRef)imageRefFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate(imageBytes,
imageSize.width,
imageSize.height,
16,
imageSize.width * 2,
colorSpace,
kCGImageAlphaNone);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return imageRef;
}
I'm going through the following methodGet the picture
Obtained original diagram
But I want to change the display of the picture through this method
- (int)getPresentValueV:(int)v AndWidth:(int)w AndHeight:(int)h{
int minv = h - roundf(w/2);
if (v < minv) {
return 0;
}
int maxv = h + roundf(w/2);
if (v > maxv) {
return 255;
}
int pv = roundf(255*(v - minv)/w);
if (pv < 0 ){
return 0;
}
if( pv > 255 ){
return 255;
}
return pv;
}
This method changes the output of PV use w h, which is less than 255, The default value of w 1600 . The default value of h -400.Next based on PV as an array of RGB (PV, PV, PV) to create UIimage.
for (unsigned i = 0; i < data.length/2; i++) {
NSData *intData = [data subdataWithRange:NSMakeRange(i*2, 2)];
[arrray addObject:#([self getPresentValueV: *(int*)([intData bytes]) AndWidth:1600 AndHeight:-400])];
}
So I want to choose the different w h by the getPresentValueV method above, and get the PV to generate the RGB (PV, PV, PV) array to generate a new picture.
- (UIImage *)imageWithGrayArray:(NSArray *)array {
const int width = 512;
const int height = 512;
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
UInt32 * pixels = (UInt32 *)malloc(height * width * sizeof(UInt32));
memset(pixels, 0, height * width * sizeof(UInt32));
for (int i = 0; i < array.count;i++ ) {
NSArray *subArray = array[i];
for ( int j = 0 ; j < subArray.count; j++) {
*(pixels +i *width + j) = makeRGBAColor([subArray[j] integerValue],0, 0, 255);
}
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixels,
width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
UIImage *image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
return image;
}
I generated a two-dimensional array to generate a new picture, but it was not the correct result.
I am getting the following error. and couldn't find anything related on google search.
Zero results..
I mean.. seriously , thats really weird.
ERROR :-
Cannot initialize a variable of type 'UInt8 *' (aka 'unsigned char *') with an rvalue of type 'void * _Nullable'
MY CODE :-
UIImage *image = self.image;
CGImageRef originalImage = [image CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef bitmapContext =
CGBitmapContextCreate(NULL,CGImageGetWidth(originalImage),CGImageGetHeight(originalImage),
8,CGImageGetWidth(originalImage)*4,colorSpace,kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(bitmapContext, CGRectMake(0, 0,
CGBitmapContextGetWidth(bitmapContext),CGBitmapContextGetHeight(bitmapContext)),
originalImage);
UInt8 *data = CGBitmapContextGetData(bitmapContext);
SCREEN SHOT :-
PS:-
I am using this code in .mm file, if it helps.
+ (unsigned char ) convertUIImageToBitmapRGBA8:(UIImage ) image {
CGImageRef imageRef = image.CGImage;
// Create a bitmap context to draw the uiimage into
CGContextRef context = [self newBitmapRGBA8ContextFromImage:imageRef];
if(!context) {
return NULL;
}
size_t width = CGImageGetWidth(imageRef);
size_t height = CGImageGetHeight(imageRef);
CGRect rect = CGRectMake(0, 0, width, height);
// Draw image into the context to get the raw image data
CGContextDrawImage(context, rect, imageRef);
// Get a pointer to the data
unsigned char bitmapData = (unsigned char )CGBitmapContextGetData(context);
// Copy the data and release the memory (return memory allocated with new)
size_t bytesPerRow = CGBitmapContextGetBytesPerRow(context);
size_t bufferLength = bytesPerRow * height;
unsigned char *newBitmap = NULL;
if(bitmapData) {
newBitmap = (unsigned char )malloc(sizeof(unsigned char) bytesPerRow * height);
if(newBitmap) { // Copy the data
for(int i = 0; i < bufferLength; ++i) {
newBitmap[i] = bitmapData[i];
}
}
free(bitmapData);
} else {
NSLog(#"Error getting bitmap pixel data\n");
}
CGContextRelease(context);
return newBitmap;
}
+ (CGContextRef) newBitmapRGBA8ContextFromImage:(CGImageRef) image {
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
uint32_t *bitmapData;
size_t bitsPerPixel = 32;
size_t bitsPerComponent = 8;
size_t bytesPerPixel = bitsPerPixel / bitsPerComponent;
size_t width = CGImageGetWidth(image);
size_t height = CGImageGetHeight(image);
size_t bytesPerRow = width * bytesPerPixel;
size_t bufferLength = bytesPerRow * height;
colorSpace = CGColorSpaceCreateDeviceRGB();
if(!colorSpace) {
NSLog(#"Error allocating color space RGB\n");
return NULL;
}
// Allocate memory for image data
bitmapData = (uint32_t *)malloc(bufferLength);
if(!bitmapData) {
NSLog(#"Error allocating memory for bitmap\n");
CGColorSpaceRelease(colorSpace);
return NULL;
}
//Create bitmap context
context = CGBitmapContextCreate(bitmapData,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast); // RGBA
if(!context) {
free(bitmapData);
NSLog(#"Bitmap context not created");
}
CGColorSpaceRelease(colorSpace);
return context;
}
+ (UIImage ) convertBitmapRGBA8ToUIImage:(unsigned char ) buffer
withWidth:(int) width
withHeight:(int) height {
size_t bufferLength = width height 4;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 32;
size_t bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
if(colorSpaceRef == NULL) {
NSLog(#"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if(pixels == NULL) {
NSLog(#"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if(context == NULL) {
NSLog(#"Error context not created");
free(pixels);
}
UIImage *image = nil;
if(context) {
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Support both iPad 3.2 and iPhone 4 Retina displays with the correct scale
if([UIImage respondsToSelector:#selector(imageWithCGImage:scale:orientation:)]) {
float scale = [[UIScreen mainScreen] scale];
image = [UIImage imageWithCGImage:imageRef scale:scale orientation:UIImageOrientationUp];
} else {
image = [UIImage imageWithCGImage:imageRef];
}
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if(pixels) {
free(pixels);
}
return image;
}
//Swift-3
CGBitmapContextGetData(bitmapContext) has been replaced by CGContext.data
let data = context.data
I am a freshman for IOS APP development, and I met a problem: " how to use vImage to convert an ARGB image to Gray image ". Actually, I have realized this conversion processing by other methods. However, I have found "vImage" could complete this work through "Accelerate Framework Reference".
I have tried this method, but there is no any conversion result. Here, I attach the code, can you help me?
-(UIImage *)rgbTograyByvImage
{
const size_t width = self.size.width * self.scale;
const size_t height = self.size.height * self.scale;
const size_t bytesPerRow = width * 4;
CGColorSpaceRef space = CGColorSpaceCreateDeviceRGB();
CGContextRef bmContext= CGBitmapContextCreate(NULL, width, height, 8, bytesPerRow, space, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedFirst);
CGColorSpaceRelease(space);
if (!bmContext) {
return nil;
}
CGContextDrawImage(bmContext, (CGRect){.origin.x = 0.0f, .origin.y = 0.0f, .size.width = width, .size.height = height}, self.CGImage);
UInt32 *data = (UInt32 *)CGBitmapContextGetData(bmContext);
if (!data) {
CGContextRelease(bmContext);
return nil;
}
int Aphal = 0;
int Red = 1;
int Green = 2;
int Blue = 3;
UInt8 *aphalSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *redSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *greenSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *blueSpace = malloc(sizeof(UInt8) * width * height);
for (int y=0; y<height; y++) {
for (int x=0; x<width; x++) {
UInt8 *argbPixel = (UInt8 *) (&data[y*width+x]);
aphalSpace[y*width+x] = argbPixel[Aphal];
redSpace[y*width+x] = argbPixel[Red];
greenSpace[y*width+x] = argbPixel[Green];
blueSpace[y*width+x] = argbPixel[Blue];
}
}
vImage_Buffer argbImageBuffer = {(UInt8*)data, height, width, bytesPerRow};
vImage_Buffer aImageBuffer = {aphalSpace, height, width, width};
vImage_Buffer rImageBuffer = {redSpace, height, width, width};
vImage_Buffer gImageBuffer = {greenSpace, height, width, width};
vImage_Buffer bImageBuffer = {blueSpace, height, width, width};
vImage_Error error;
error = vImageConvert_ARGB8888toPlanar8(&argbImageBuffer, &aImageBuffer, &rImageBuffer, &gImageBuffer, &bImageBuffer, kvImageNoFlags);
if (error != kvImageNoError) {
NSLog(#"%s, vImage error %zd", __PRETTY_FUNCTION__, error);
}
CGImageRef grayImage = CGBitmapContextCreateImage(bmContext);
UIImage *gray = [UIImage imageWithCGImage:grayImage];
CGContextRelease(bmContext);
CGImageRelease(grayImage);
free(aphalSpace);
free(redSpace);
free(greenSpace);
free(blueSpace);
return gray;
}
You want vImageMatrixMultiply_ARGB8888ToPlanar8, new for iOS 9.
Capturing screenshot using opengl works fine for iOS version before 7.
But while running app in ios7 it returns black screen.
Following is the piece of code which i am working with:
-(UIImage *) glToUIImage {
CGSize size = self.view.frame.size;
int image_height = (int)size.width;
int image_width = (int)size.height;
NSInteger myDataLength = image_width * image_height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < image_height; y++)
{
for(int x = 0; x < image_width * 4; x++)
{
buffer2[(image_height - 1 - y) * image_width * 4 + x] = buffer[y * 4 * image_width + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * image_width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(image_width, image_height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
// screenshot function, combined my opengl image with background image and
// saved into Photos.
-(UIImage*)screenshot
{
UIImage *image = [self glToUIImage];
CGRect pos = CGRectMake(0, 0, image.size.width, image.size.height);
UIGraphicsBeginImageContextWithOptions(image.size, NO, [[UIScreen mainScreen]scale]);
[image drawInRect:pos];
UIImage* final = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
final = [[UIImage alloc] initWithCGImage: final.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
// final picture I saved into Photos.
UIImageWriteToSavedPhotosAlbum(final, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
return final;
}
Has anybody captured screenshot using opengl for ios7?
Have resolved the issue. Updated the drawableproperties function of eaglview class:
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:FALSE],
kEAGLDrawablePropertyRetainedBacking,
kEAGLColorFormatRGBA8,
kEAGLDrawablePropertyColorFormat,
nil];
:) :)
I want to apply negative value of RGB into image.
R: -8
G: -89
B: -76
I know that the value of RGB is in between 0 to 255 but i want to apply minus value and set offset something like that but don't know how to do.
I am using following link for reference.
http://blog.swishzone.com/?p=9606
I am using
red value = (original red value * redMultiplier) + redOffset
green value = (original green value * greenMultiplier) + greenOffset
blue value = (original blue value * blueMultiplier) + blueOffset
But it not working as i want.
Found one solution for change image color using negative RGB values. Following is the method without affecting alpha of image.
-(void)ColorChangeProcessing :(int )redvalue greenValue:(int)greenvalue blueValue:(int)bluevalue imageUsed : (UIImageView *)image
{
CGContextRef ctx;
CGImageRef imageRef = [image.image CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
int byteIndex = (bytesPerRow * 0) + 0 * bytesPerPixel,RED = redvalue,GREEN=greenvalue,BLUE = bluevalue;
for (int ii = 0 ; ii < width * height ; ++ii)
{
if( rawData[byteIndex+3] != 0 && (rawData[byteIndex] != '/0' || rawData[byteIndex+1] != '/0' || rawData[byteIndex+2] != '/0' ))
{
if ((((rawData[byteIndex])+RED)) > 255)
{
rawData[byteIndex] = (char)255;
}
else if((((rawData[byteIndex])+RED)) >0)
{
rawData[byteIndex] = (char) (((rawData[byteIndex] * 1.0) + RED));
}
else
{
rawData[byteIndex] = (char)0;
}
if ((((rawData[byteIndex+1])+GREEN)) > 255)
{
rawData[byteIndex+1] = (char)255;
}
else if((((rawData[byteIndex+1])+GREEN))>0)
{
rawData[byteIndex+1] = (char) (((rawData[byteIndex+1] * 1.0) + GREEN));
}
else
{
rawData[byteIndex+1] = (char)0;
}
if ((((rawData[byteIndex+2])+BLUE)) > 255)
{
rawData[byteIndex+2] = (char)255;
}
else if((((rawData[byteIndex+2])+BLUE))>0)
{
rawData[byteIndex+2] = (char) (((rawData[byteIndex+2] * 1.0) + BLUE));
}
else
{
rawData[byteIndex+2] = (char)0;
}
}
byteIndex += 4;
}
ctx = CGBitmapContextCreate(rawData,
CGImageGetWidth( imageRef ),
CGImageGetHeight( imageRef ),
8,
CGImageGetBytesPerRow( imageRef ),
CGImageGetColorSpace( imageRef ),
kCGImageAlphaPremultipliedLast );
CGImageRef NewimageRef = CGBitmapContextCreateImage (ctx);
UIImage* rawImage = [UIImage imageWithCGImage:NewimageRef];
tempViewForProcessingColors = [[UIImageView alloc] init];
tempViewForProcessingColors = [[arrayWithDictionary objectAtIndex:ImageCount]valueForKey:#"imageView"];
// NSLog(#"Name: %# --- Red: %d --- Green: %d --- Blue: %d",tempViewForProcessingColors.accessibilityLabel,RED,GREEN,BLUE);
tempViewForProcessingColors.image = rawImage;
//ImageCount++;
CGContextRelease(ctx);
free(rawData);
CGImageRelease(NewimageRef);
}