UIImage cv::Mat conversions with alpha channel - ios

I'm using following codes for converting UIImage* and cv::Mat to each other:
- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
and
-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
I took these from OpenCV Documentation. I use them as follows:
UIImage *img = [UIImage imageNamed:#"transparent.png"];
UIImage *img2 = [self UIImageFromCVMat:[self cvMatFromUIImage:img]];
However these functions loses the alpha channel information. I know it is because of the flags kCGImageAlphaNone and kCGImageAlphaNoneSkipLast, unfortunately I could't find a way not lose alpha information by changing these flags.
So, how do I convert these two types between each other without losing alpha information?
Here is the image that I use:

We should use these functions from opencv v2.4.6:
UIImage* MatToUIImage(const cv::Mat& image);
void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist = false);
And don't forget to include:
opencv2/imgcodecs/ios.h

You need to not pass kCGImageAlphaNoneSkipLast and instead pass (kCGBitmapByteOrder32Host | kCGImageAlphaPremultipliedFirst) to get premultiplied alpha in BGRA format. CoreGraphics only supports premultiplied alpha. But, you will need to check on how OpenCV represents alpha in pixels to determine how to tell OpenCV that the pixels are already premultiplied. The code I have used assumes straight alpha with OpenCV, so you will need to be careful of that.

Related

OpenCV with objective c

With referenc to this OpenCV Adaptive Threshold OCR
I have done below code in objective c with OpenCV
- (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
-(void) CalcBlockMeanVariance:(cv::Mat)Img resource: (cv::Mat)Res //float blockSide=21) // blockSide - the parameter (set greater for larger font on image)
{
float blockSide = 21;
cv::Mat I;
Img.convertTo(I,CV_32FC1);
Res=cv::Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
cv::Mat inpaintmask;
cv::Mat patch;
cv::Mat smallImg;
cv::Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(cv::Range::Range(i,i+blockSide+1),cv::Range::Range(j,j+blockSide+1));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,cv::THRESH_BINARY);
cv::Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, cv::INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_32FC1,1.0/255.0);
}
- (IBAction)renderThis:(id)sender{
cv::Mat source=[self cvMatFromUIImage: self.imgTaken];//cv::imread("Test2.JPG",0);
cv::Mat Img;
cv::cvtColor(source, Img, cv::COLOR_RGB2GRAY);
cv::Mat res;
Img.convertTo(Img,CV_32FC1,1.0/255.0);
[self CalcBlockMeanVariance:Img resource:res];
res=1.0-res;
res=Img+res;
cv::threshold(res,res,0.85,1,cv::THRESH_BINARY);
cv::resize(res,res,cv::Size(res.cols/2,res.rows/2));
self.imgPreview.image = [self UIImageFromCVMat:(res*255)];
}
getting black image in output image. can anyone guide me on this?
Found the solution,
Just have to change the type from CV_32FC1 to CV_8UC3 and it worked like a charm :)

Convert image to black and white (not grayscale) using OpenCV iOS Objective C

I have gone through many of similar question here on SO, but it does not give the specific output that i need. I have tried converting image to black and white but due to some reason some of the text does not appear clear or we can say gets distorted. Below here is the code that i have tried so far...
+(UIImage *)grayImage:(UIImage *)processedImage{
cv::Mat grayImage = [MMOpenCVHelper cvMatGrayFromAdjustedUIImage:processedImage];
cv::adaptiveThreshold(grayImage, grayImage, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2);
cv::GaussianBlur(grayImage, grayImage, cv::Size(1,1), 50.0);
UIImage *grayeditImage=[MMOpenCVHelper UIImageFromCVMat:grayImage];
grayImage.release();
return grayeditImage;
}
+ (cv::Mat)cvMatGrayFromAdjustedUIImage:(UIImage *)image {
cv::Mat cvMat = [self cvMatFromAdjustedUIImage:image];
cv::Mat grayMat;
if ( cvMat.channels() == 1 ) {
grayMat = cvMat;
}
else {
grayMat = cv :: Mat( cvMat.rows,cvMat.cols, CV_8UC1 );
cv::cvtColor( cvMat, grayMat, cv::COLOR_BGR2GRAY );
}
return grayMat; }
+ (cv::Mat)cvMatFromAdjustedUIImage:(UIImage *)image {
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to backing data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat; }
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat {
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
CGBitmapInfo bitmapInfo;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone | kCGBitmapByteOrderDefault;
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGBitmapByteOrder32Little | (
cvMat.elemSize() == 3? kCGImageAlphaNone : kCGImageAlphaNoneSkipFirst
);
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
bitmapInfo,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage; }
The output that i got from the above code is here, and the result that i want is here, any help will be great..! Thank You
EDITED :- Original Image here
I would like to answer my question as i might get helpful. I got B/W output by change in adaptive threshold algorithm and value of block size, below is the code that is used
+(UIImage *)grayImage:(UIImage *)processedImage{ // B/W
cv::Mat grayImage = [MMOpenCVHelper cvMatGrayFromAdjustedUIImage:processedImage];
cv::adaptiveThreshold(grayImage, grayImage, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 7);
cv::GaussianBlur(grayImage, grayImage, cv::Size(1,1), 50.0);
UIImage *grayeditImage=[MMOpenCVHelper UIImageFromCVMat:grayImage];
grayImage.release();
return grayeditImage;
}

Image is rotated and stretched automatically using Open CV iOS. How to fix?

I am using OpenCV 3. I have installed the framework in my Xcode project using POD. To convert the image (Captured by camera) into black and white, I am using Adaptive Gaussian Thresholding. Below is the code I have used
#implementation MyClass
+(UIImage *)toBlackAndWhite:(UIImage *)s {
cv::Mat input;
cv::Mat output;
input = [MyClass cvMatFromUIImage:s];
cv::cvtColor(input, input, cv::COLOR_BGR2GRAY);
output = cv::Mat(input.cols, input.rows, IPL_DEPTH_8U, 1);
cv::adaptiveThreshold(input, output, 255,CV_ADAPTIVE_THRESH_GAUSSIAN_C,CV_THRESH_BINARY, 75, 25);
return [MyClass imageWithCVMat:output];
}
//Ref:Open CV documentation
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
//Ref:Open CV documentation
+ (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
CGImageRef imageRef = CGImageCreate(cvMat.cols, // Width
cvMat.rows, // Height
8, // Bits per component
8 * cvMat.elemSize(), // Bits per pixel
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone | kCGBitmapByteOrderDefault, // Bitmap info flags
provider, // CGDataProviderRef
NULL, // Decode
false, // Should interpolate
kCGRenderingIntentDefault); // Intent
UIImage *image = [[UIImage alloc] initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return image;
}
#end
Problem: The image I'm getting, is rotated 90°, anti-clock wise and stretched. Please suggest something how can I fix it. Please see
Original Image
Processed Image
The problem is due to the UIImageOrientation of UIImage.
Background:
When you hold iPhone at landscape mode(Home button at right side), took a photo, that photo has 'Up' orientation(Yes, this is default). So when you took a photo at portrait mode, the orientation is 'Right'. For a 'Right' orientation photo(portrait), they are saved still at landscape mode actually. The photo looks well when you view it because the 'UIImage' automatically rotated it from landscape mode to portrait. (Rotated it to right, 90 degree, width -> height, height -> width)
How the stretch problem occurs?
For photos which have 'Right' and 'Left' orientation, the rotated UIImage data will be converted to OpenCV Mat object, that makes the 'width' and 'height' swapped.
Solution: We need to give the original 'width' and 'height' to Mat, which is at landscape mode. The CGImage property of an UIImage has the image data with original 'width' and 'height', we can use it.
How the ‘rotated 90°’ problem occurs?
The OpenCV Mat object doesn't keep the orientation info, so the orientation is lost after we converted UIImage to Mat, and converted it back.
Solution: Keep the orientation and apply it to result UIImage object.
Code related to the question:
+(UIImage *)toBlackAndWhite:(UIImage *)s {
//Create an image with original width and height
UIImage *imageUp = [UIImage imageWithCGImage:[s CGImage]];
//......
input = [MyClass cvMatFromUIImage:imageUp];
//......
UIImage *handledImage = [MyClass imageWithCVMat:output];
//Set orientation to the result image
UIImage *finalImage = [UIImage imageWithCGImage:[handledImage CGImage] scale:[s scale] orientation: s.imageOrientation];
return finalImage;
}

my iphone app taking up so much memory

I made gallery app using UICollectionView but i got bad performance about allocation like below.
I couldn't find where it is bad. Where should I explicitly release object?
Let me know Please.
following code is doubtful about it.
In collectionView,
- (UICollectionViewCell *)collectionView:(UICollectionView *)collectionView cellForItemAtIndexPath:(NSIndexPath *)indexPath
...
dispatch_async(all_queue, ^{
ALAssetRepresentation *representation = [asset defaultRepresentation];
UIImage *image = [UIImage imageWithCGImage:[representation fullResolutionImage]
scale:[representation scale]
orientation:(UIImageOrientation)[representation orientation]];
NSString *filename = [representation filename];
NSLog(#"%#", filename);
NSLog(#"Loaded Image row : %d", indexPath.row);
vector<cv::Rect> faces = [ImageUtils findFeature:image minsize:MIN_FACE_SIZE
withCascade:face_cascade];
Mat imageMat = [ImageUtils cvMatFromUIImage:image];
for(unsigned int i = 0; i < es.size(); ++i) {
rectangle(imageMat, cv::Point(es[i].x, es[i].y),
cv::Point(es[i].x + es[i].width, es[i].y + es[i].height),
cv::Scalar(0,255,255),5);
}
dispatch_async(dispatch_get_main_queue(), ^{
[faceImage setImage:[ImageUtils UIImageFromCVMat:imageMat]];
[cell setNeedsDisplay];
});
});
return cell;
}
Called Method
+ (cv::Mat)cvMatFromUIImage:(UIImage *)image
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage);
CGFloat cols = image.size.width;
CGFloat rows = image.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels (color channels + alpha)
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), image.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
Another Method
+ (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}
The other method
+(cv::vector<cv::Rect>)findFeature:(UIImage *)image minsize:(cv::Size)minSize withCascade:(CascadeClassifier)cascade
{
vector<cv::Rect> faces;
Mat frame_gray;
Mat imageMat = [ImageUtils cvMatFromUIImage:image];
cvtColor(imageMat, frame_gray, CV_BGRA2GRAY);
equalizeHist(frame_gray, frame_gray);
cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, minSize);
frame_gray.release();
imageMat.release();
return faces;
}
Its because your UImage resolution is too high. You have to find a way to reduce its size.
Use dequeueReusableCellWithReuseIdentifier while creating collection view cells.
Also resize your image in which you are processing, this will definitely reduce your size.

how to convert image to a grey color

I'm a new in OpenCV. I have some issues with image converting. I create a new project. It has 2 more files Wrapper.h/mm and UIImage+OpenCV.h/.mm. The following code is here:
//UIImage+OpenCV.h
#import <UIKit/UIKit.h>
#interface UIImage (OpenCV)
//cv::Mat to UIImage
+ (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat;
+ (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat
orientation:(UIImageOrientation)orientation;
- (id)initWithCVMat:(const cv::Mat&)cvMat
orientation:(UIImageOrientation)orientation;
//UIImage to cv::Mat
- (cv::Mat)CVMat;
- (cv::Mat)CVMat3; // no alpha channel
- (cv::Mat)CVGrayscaleMat;
#end
//UIImage+OpenCV.mm
#import "UIImage+OpenCV.h"
#implementation UIImage (OpenCV)
-(cv::Mat)CVMat
{
CGColorSpaceRef colorSpace = CGImageGetColorSpace(self.CGImage);
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat(rows, cols, CV_8UC4); // 8 bits per component, 4 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNoneSkipLast |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), self.CGImage);
CGContextRelease(contextRef);
return cvMat;
}
- (cv::Mat)CVMat3
{
cv::Mat result = [self CVMat];
cv::cvtColor(result , result , CV_RGBA2RGB);
return result;
}
-(cv::Mat)CVGrayscaleMat
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGFloat cols = self.size.width;
CGFloat rows = self.size.height;
cv::Mat cvMat(rows, cols, CV_8UC1); // 8 bits per component, 1 channels
CGContextRef contextRef = CGBitmapContextCreate(cvMat.data, // Pointer to data
cols, // Width of bitmap
rows, // Height of bitmap
8, // Bits per component
cvMat.step[0], // Bytes per row
colorSpace, // Colorspace
kCGImageAlphaNone |
kCGBitmapByteOrderDefault); // Bitmap info flags
CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), self.CGImage);
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
return cvMat;
}
+ (UIImage *)imageWithCVMat:(const cv::Mat&)cvMat
{
return [[UIImage alloc] initWithCVMat:cvMat];
}
- (id)initWithCVMat:(const cv::Mat&)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize() * cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
self = [self initWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return self;
}
#end
Above code I took here.
//Wrapper.h
#import <Foundation/Foundation.h>
#interface CVWrapper : NSObject
+(UIImage*) returnPic: (UIImage*)image;
#end
//Wrapper.mm
#import "CVWrapper.h"
#import "UIImage+OpenCV.h"
#implementation CVWrapper
+(UIImage*)returnPic:(UIImage *)image
{
UIImage *result = nil;
if (image)
{
cv::Mat matImage = [image CVMat];
cv::Mat greyMat; //It doesn't' work
cv::cvtColor(matImage, greyMat, 7); //It doesn't work. In enum of imgproc.hpp COLOR_RGB2GRAY=7
result = [UIImage imageWithCVMat:greyMat];
}
return result;
}
#end
In ViewController I call the function that displays an image. I try to understand a basic concepts. How can i convert a picture in a grey color? Where i have to do it?

Resources