I'm trying to get the text area on a image using core detector.
- (NSArray *)detectWithImage:(UIImage *)img
{
// prepare CIImage
CIImage *image = [CIImage imageWithCGImage:img.CGImage];
// flip vertically
CIFilter *filter = [CIFilter filterWithName:#"CIAffineTransform"];
[filter setValue:image forKey:kCIInputImageKey];
CGAffineTransform t = CGAffineTransformMakeTranslation(0, CGRectGetHeight(image.extent));
t = CGAffineTransformScale(t, 1.0, -1.0);
[filter setValue:[NSValue valueWithCGAffineTransform:t] forKey:kCIInputTransformKey];
image = filter.outputImage;
// prepare CIDetector
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeText
context:nil
options:#{
CIDetectorAccuracy: CIDetectorAccuracyHigh}];
// retrive array of CITextFeature
NSArray *features = [detector featuresInImage:image
options:#{CIDetectorReturnSubFeatures: #YES}];
return features;
}
The image passed is:
I get nothing from this image. I tried with color image as well and also without flipping the image.
Can someone point me in right direction ?
Thanks!
You should check to make sure the UIImage and img.CGImage being passed into your function are not nil, as the rest of your code seems to be fine, though the flip is not necessary. For example:
UIImageView *imageView = [[UIImageView alloc] initWithImage: img];
CIImage *image = [CIImage imageWithCGImage:img.CGImage];
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeText
context:nil
options:#{
CIDetectorAccuracy: CIDetectorAccuracyHigh}];
// retrive array of CITextFeature
NSArray *features = [detector featuresInImage:image options:#{CIDetectorReturnSubFeatures: #YES}];
for(CITextFeature *feature in features) {
UIView *view = [[UIView alloc] initWithFrame: CGRectMake(feature.bounds.origin.x, image.size.height - fear.bounds.origin.y - feature.bounds.height, fear.bounds.width, feature.bounds.height)];
view.backgroundColor = [[UIColor redColor] colorWithAlphaComponent: 0.25];
[imageView addSubview: view];
}
Produced the result:
Where the red highlight represents the bounds returned from the CIDetector
Related
A feature of my React Native App consists of making Perspective Corrections to Document Photos.
It takes 4 points, crops the image, perspective corrects it, and then applys CIFilter to adjust the colours and export as Base64 String.
We were trying to run this on an iPhone 11 Simulator
As of now, we have been getting this error
attempt to insert nil object from objects[0]
Assumption: It is probably happening because it can't read the image from file storage / the file is being read as nil
Here is the source code
#import "CustomCropManager.h"
#import <React/RCTLog.h>
#implementation CustomCropManager
RCT_EXPORT_MODULE();
RCT_EXPORT_METHOD(crop:(NSDictionary *)points imageUri:(NSString *)imageUri callback:(RCTResponseSenderBlock)callback)
{
NSLog(#"[myLOG] PARSING");
NSString *parsedImageUri = [imageUri stringByReplacingOccurrencesOfString:#"file://" withString:#""];
NSLog(#"[myLOG] parsedImageUri");
NSURL *fileURL = [NSURL fileURLWithPath:parsedImageUri];
NSLog(#"[myLOG] fileURL");
CIImage *ciImage = [CIImage imageWithContentsOfURL:fileURL];
NSLog(#"[myLOG] ciImage");
CGPoint newLeft = CGPointMake([points[#"topLeft"][#"x"] floatValue], [points[#"topLeft"][#"y"] floatValue]);
CGPoint newRight = CGPointMake([points[#"topRight"][#"x"] floatValue], [points[#"topRight"][#"y"] floatValue]);
CGPoint newBottomLeft = CGPointMake([points[#"bottomLeft"][#"x"] floatValue], [points[#"bottomLeft"][#"y"] floatValue]);
CGPoint newBottomRight = CGPointMake([points[#"bottomRight"][#"x"] floatValue], [points[#"bottomRight"][#"y"] floatValue]);
NSLog(#"[myLOG] CGPOINTS");
newLeft = [self cartesianForPoint:newLeft height:[points[#"height"] floatValue] ];
newRight = [self cartesianForPoint:newRight height:[points[#"height"] floatValue] ];
newBottomLeft = [self cartesianForPoint:newBottomLeft height:[points[#"height"] floatValue] ];
newBottomRight = [self cartesianForPoint:newBottomRight height:[points[#"height"] floatValue] ];
NSLog(#"[myLOG] new");
NSMutableDictionary *rectangleCoordinates = [[NSMutableDictionary alloc] init];
rectangleCoordinates[#"inputTopLeft"] = [CIVector vectorWithCGPoint:newLeft];
rectangleCoordinates[#"inputTopRight"] = [CIVector vectorWithCGPoint:newRight];
rectangleCoordinates[#"inputBottomLeft"] = [CIVector vectorWithCGPoint:newBottomLeft];
rectangleCoordinates[#"inputBottomRight"] = [CIVector vectorWithCGPoint:newBottomRight];
NSLog(#"[myLOG] rectangleCoordinates");
ciImage = [ciImage imageByApplyingFilter:#"CIPerspectiveCorrection" withInputParameters:rectangleCoordinates];
NSLog(#"[myLOG] ciImage");
// custom code
CIFilter* colorControlsFilter = [CIFilter filterWithName:#"CIColorControls"];
[colorControlsFilter setValue:ciImage forKey:#"inputImage"];
[colorControlsFilter setValue:[NSNumber numberWithFloat:1.0] forKey:#"inputSaturation"];
[colorControlsFilter setValue:[NSNumber numberWithFloat:0.2] forKey:#"inputBrightness"];
[colorControlsFilter setValue:[NSNumber numberWithFloat:1.0] forKey:#"inputContrast"];
ciImage = [colorControlsFilter valueForKey:#"outputImage"];
// custom code
NSLog(#"[myLOG] ciImage ssss");
CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef cgimage = [context createCGImage:ciImage fromRect:[ciImage extent]];
UIImage *image = [UIImage imageWithCGImage:cgimage];
NSLog(#"[myLOG] image");
NSData *imageToEncode = UIImageJPEGRepresentation(image, 0.8);
NSLog(#"[myLOG] calling...");
callback(#[[NSNull null], #{#"image": [imageToEncode base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength]}]);
}
- (CGPoint)cartesianForPoint:(CGPoint)point height:(float)height {
return CGPointMake(point.x, height - point.y);
}
#end
"Must have 50 reputation to comment"
You should output every variable you are trying to use. Change
NSLog(#"[myLOG] ciImage"); to NSLog(#"[myLOG] ciImage %#", ciImage);
If it prints out NSLog(#"[myLOG] ciImage"); then the crash must happen after that and before the next NSLog. Meaning this line fails:
[colorControlsFilter setValue:ciImage forKey:#"inputImage"];
As you stated the image is nil which means the line that set the image contains an error.
ciImage = [ciImage imageByApplyingFilter:#"CIPerspectiveCorrection" withInputParameters:rectangleCoordinates];
Does the filter exist and is written properly?
For debugging purposes you can uncomment this line and see if the original image displays.
I am trying to scan QR images that user chooses from disk. I found a strange issue where all libraries I tried failed (CIDetector old port of ZXING or ZBAR).
I know that there are ways to add white background (e.g. redraw image or using CIFilter) so the image will get scanned.
What is the correct way of scanning QR codes with transparent background (configure CIContext or CIDetector). (the image below fails to scan on iOS and macOS).
https://en.wikipedia.org/wiki/QR_code#/media/File:QR_code_for_mobile_English_Wikipedia.svg
- (void)scanImage:(CIImage *)image
{
NSArray <CIFeature *>*features = [[self QRdetector] featuresInImage:image];
NSLog(#"Number of features found: %lu", [features count]);
}
- (CIDetector *)QRdetector
{
CIContext *context = [CIContext contextWithOptions:#{kCIContextWorkingColorSpace : [NSNull null]}]; //no difference using special options or nil as a context
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeQRCode context:context options:#{CIDetectorAccuracy : CIDetectorAccuracyHigh, CIDetectorAspectRatio : #(1)}];
return detector;
}
- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
NSURL *URL = [[NSBundle mainBundle] URLForResource:#"transparentqrcode" withExtension:#"png"];
UIImage *image = [UIImage imageWithContentsOfFile:[URL path]];
CIImage *ciImage = [CIImage imageWithContentsOfURL:URL];
//CUSTOM CODE TO ADD WHITE BACKGROUND
CIFilter *filter = [CIFilter filterWithName:#"CISourceAtopCompositing"];
[filter setDefaults];
CIColor *whiteColor = [[CIColor alloc] initWithColor:[UIColor whiteColor]];
CIImage *colorImage = [CIImage imageWithColor:whiteColor];
colorImage = [colorImage imageByCroppingToRect:ciImage.extent];
[filter setValue:ciImage forKey:kCIInputImageKey];
[filter setValue:colorImage forKey:kCIInputBackgroundImageKey];
CIImage *newImage = [filter valueForKey:kCIOutputImageKey];
[self scanImage:ciImage];
return YES;
}
As mentioned in the comments, it appears CIDetector treats the alpha channel as black. Replacing it with white works --- unless the QRCode itself is white with a transparent background.
I haven't done any profiling to see if this would be quicker, but it might be a better option.
- (IBAction)didTap:(id)sender {
NSURL *URL = [[NSBundle mainBundle] URLForResource:#"transparentqrcode" withExtension:#"png"];
CIImage *ciImage = [CIImage imageWithContentsOfURL:URL];
NSArray <CIFeature *>*features = [self getImageFeatures:ciImage];
// if CIDetector failed to find / process a QRCode in the image,
// (such as when the image has a transparent background),
// invert the colors and try again
if (features.count == 0) {
CIFilter *filter = [CIFilter filterWithName:#"CIColorInvert"];
[filter setValue:ciImage forKey:kCIInputImageKey];
CIImage *newImage = [filter valueForKey:kCIOutputImageKey];
features = [self getImageFeatures:newImage];
}
if (features.count > 0) {
for (CIQRCodeFeature* qrFeature in features) {
NSLog(#"QRFeature.messageString : %# ", qrFeature.messageString);
}
} else {
NSLog(#"Unable to decode image!");
}
}
- (NSArray <CIFeature *>*)getImageFeatures:(CIImage *)image
{
NSArray <CIFeature *>*features = [[self QRdetector] featuresInImage:image];
NSLog(#"Number of features found: %lu", [features count]);
return features;
}
- (CIDetector *)QRdetector
{
CIContext *context = [CIContext contextWithOptions:#{kCIContextWorkingColorSpace : [NSNull null]}]; //no difference using special options or nil as a context
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeQRCode context:context options:#{CIDetectorAccuracy : CIDetectorAccuracyHigh, CIDetectorAspectRatio : #(1)}];
return detector;
}
Before Applying filter Image
After applying Red Eye filter, get bad output image
Output Image is different every time.
Here is the code of my red eye correction filter
UIImage *img = self.imageInput;
CIImage *image = [[CIImage alloc]initWithImage:img];
NSLog(#"after ciimage: %#", kCIImageAutoAdjustEnhance);
NSDictionary *options = [NSDictionary dictionaryWithObject:#"NO" forKey:kCIImageAutoAdjustEnhance];
NSLog(#"options: %#", options);
NSArray *adjustments = [image autoAdjustmentFiltersWithOptions:options];
NSLog(#"adjustments: %# ", adjustments);
for (CIFilter * filter in adjustments)
{
[filter setValue:image forKey:kCIInputImageKey];
image = filter.outputImage;
}
CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef cgImage = [context createCGImage:image fromRect:image.extent];
UIImage *enhancedImage = [[UIImage alloc] initWithCGImage:cgImage];
CGImageRelease(cgImage);
self.imageViewSource.image = enhancedImage;
My app is crashing when I attempt to apply a filter to my user-selected UIImage (It has been working fine without applying the filter). I added and imported the "CoreImage" framework to my project so I could create filters for user-selected images.
I am attempting to apply the filter by creating a category for UIImage (based on Apple's documentation, and then calling the corresponding method on the UIImage selected by the user. Following is the code of my category header and body; what am I doing wrong? (please note, "randColor" is a category UIColor class method to generate a random color)
#import <UIKit/UIKit.h>
#import <CoreImage/CoreImage.h>
#import "UIColor+CustomColorCategory.h"
#interface UIImage (MonoChromeFilter)
- (UIImage *) applyMonoChromeWithRandColor;
#end
#import "UIImage+MonoChromeFilter.h"
#implementation UIImage (MonoChromeFilter)
- (UIImage *)applyMonoChromeWithRandColor
{
CIContext *context = [CIContext contextWithOptions:nil];
CIImage *ciImage = [[CIImage alloc] initWithImage:self];
CIFilter *filter = [CIFilter filterWithName:#"CIColorMonochrome"];
[filter setValue:ciImage forKey:kCIInputImageKey];
[filter setValue:[UIColor randColor] forKey:kCIAttributeTypeColor];
CIImage *result = [filter valueForKey:kCIOutputImageKey];
CGRect extent = [result extent];
CGImageRef cgImage = [context createCGImage:result fromRect:extent];
UIImage *filteredImage = [[UIImage alloc] initWithCGImage:cgImage];
return filteredImage;
}
#end
Here is the method in the viewController where this category is being called:
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
[picker dismissViewControllerAnimated:YES completion:^{
UIImage *editedImage = [info objectForKey:UIImagePickerControllerEditedImage];
editedImage = [editedImage applyMonoChromeWithRandColor];
self.blogImageOutlet.image = editedImage;
self.blogImageOutlet.layer.cornerRadius = self.blogImageOutlet.frame.size.width / 2.0;
[self.blogImageOutlet setClipsToBounds:YES];
[self saveImageToLibrary:editedImage];
}];
}
I figured it out! After debugging and using some other projects as a point of reference, I realized that I was experiencing two issues. First, I was trying to use a UIColor for CIColor, which is not directly possible. I first had to covert the UIColor to a CIColor to be able to apply it. Next, I was not using the correct strings for the CIFilter value keys. Here is the following code after modifications (and now it works!)
#import "UIImage+MonoChromeFilter.h"
#implementation UIImage (MonoChromeFilter)
+ (UIImage *) applyMonoChromeWithRandColor: (UIImage *)uIImage
{
// Convert UIColor to CIColor
CGColorRef colorRef = [UIColor randColor].CGColor;
NSString *colorString = [CIColor colorWithCGColor:colorRef].stringRepresentation;
CIColor *coreColor = [CIColor colorWithString:colorString];
CIContext *context = [CIContext contextWithOptions:nil];
// Convert UIImage to CIImage
CIImage *ciImage = [[CIImage alloc] initWithImage:uIImage];
// Set values for CIColorMonochrome Filter
CIFilter *filter = [CIFilter filterWithName:#"CIColorMonochrome"];
[filter setValue:ciImage forKey:kCIInputImageKey];
[filter setValue:#1.0 forKey:#"inputIntensity"];
[filter setValue:coreColor forKey:#"inputColor"];
CIImage *result = [filter valueForKey:kCIOutputImageKey];
CGRect extent = [result extent];
CGImageRef cgImage = [context createCGImage:result fromRect:extent];
UIImage *filteredImage = [[UIImage alloc] initWithCGImage:cgImage];
return filteredImage;
}
#end
Try This , It's work like champs for me ,
We have applied different CIFilter for single image,
Below is code for implementation ,
//CISepiaTone Effect,
CIContext *imageContext = [CIContext contextWithOptions:nil];
CIImage *image = [[CIImage alloc]initWithImage:inputimage];
CIFilter *filter = [CIFilter filterWithName:#"CISepiaTone"
keysAndValues: kCIInputImageKey, image,
#"inputIntensity", #1, nil];
CIImage *result = [filter valueForKey: #"outputImage"];
CGImageRef cgImageRef = [imageContext createCGImage:result fromRect:[result extent]];
UIImage *targetImage = [UIImage imageWithCGImage:cgImageRef];
detailsVc.filterImage=targetImage;
[self.navigationController pushViewController:detailsVc animated:YES];
//CIVignette Effect
CIContext *imageContext = [CIContext contextWithOptions:nil];
CIImage *image = [[CIImage alloc] initWithImage:inputimage];
CIFilter *vignette = [CIFilter filterWithName:#"CIVignette"];
[vignette setDefaults];
[vignette setValue: image forKey: #"inputImage"];
[vignette setValue: [NSNumber numberWithFloat: 1.0] forKey: #"inputIntensity"];
[vignette setValue: [NSNumber numberWithFloat: 10.00 ] forKey: #"inputRadius"];
CIImage *result = [vignette valueForKey: #"outputImage"];
CGImageRef cgImageRef = [imageContext createCGImage:result fromRect:[result extent]];
UIImage *targetImage = [UIImage imageWithCGImage:cgImageRef];
detailsVc.filterImage=targetImage;
[self.navigationController pushViewController:detailsVc animated:YES];
For detail implementation you can refer this GitHub project file ImageFilter
Hope so This will help for some one .
So, I've got an odd scenario.
In my iOS app, I'm trying to blur the content area of the screen when a popover is opened.
I have this working when using Core Image, but only when using Gaussian blur- none of the other blurs work, which is odd.
I tried doing the same with GPUImage, and it blurs far faster, but doesn't actually put the view on top of the other views!
To summarize: in the source below, setBlurOnView will work properly- however setBlurOnViewWithGPUImage appears to not be working. The blur view (tag 6110) is created, but the app doesn't actually blur.
Note: This is on iOS 6, in the simulator.
Here's the relevant source:
// ScreenBlur.m
#import <QuartzCore/QuartzCore.h>
#import <CoreImage/CoreImage.h>
#import <GPUImage/GPUImage.h>
#import "ScreenBlur.h"
#import "GlobalData.h"
#import "Logger.h"
#implementation ScreenBlur
+ (void) setBlurOnViewWithGPUImage:(UIView*)view {
GPUImagePicture *imageSource = [[GPUImagePicture alloc] initWithImage:[self captureScreenInRect:view.frame inView:view]];
GPUImageGaussianBlurFilter *blur = [[GPUImageGaussianBlurFilter alloc] init];
[imageSource addTarget:blur];
[imageSource processImage];
[self setImage:[imageSource imageFromCurrentlyProcessedOutput] toView:view];
}
+ (void) setBlurOnView:(UIView *)view {
//http://stackoverflow.com/questions/17041669/creating-a-blurring-overlay-view
CIImage *inputImage = [CIImage imageWithCGImage:[self captureScreenInRect:view.frame inView:view].CGImage];
//CIContext *context = [CIContext contextWithOptions:nil];
if ([GlobalData getInstance].ciContext == nil) {
[Logger Log:#"ciContext does not exist, creating..." fromClass:#"ScreenBlur"];
// [GlobalData getInstance].ciContext = [CIContext contextWithOptions:nil]; //cpu context
[GlobalData getInstance].ciContext = [CIContext contextWithEAGLContext:[[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2]];
}
//set up the blur filter
CIFilter *filter = [CIFilter filterWithName:#"CIGaussianBlur"];
[filter setValue:inputImage forKey:kCIInputImageKey];
[filter setValue:[NSNumber numberWithFloat:3.0f] forKey:#"inputRadius"];
CIImage *result = [filter valueForKey:kCIOutputImageKey];
// CIGaussianBlur has a tendency to shrink the image a little,
// this ensures it matches up exactly to the bounds of our original image
CGImageRef cgImage = [[GlobalData getInstance].ciContext createCGImage:result fromRect:[inputImage extent]];
[self setImage:[UIImage imageWithCGImage:cgImage] toView:view];
}
+ (void) setImage:(UIImage*)blurredImage toView:(UIView*)view {
UIView *blurView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, blurredImage.size.width, blurredImage.size.height)];
[blurView setBackgroundColor:[UIColor colorWithPatternImage:blurredImage]];
[blurView setTag:6110];
//set the image as the foreground for the view
[view addSubview:blurView];
[view bringSubviewToFront:blurView];
}
//same as the method above, but resizes the screenshot before applying the blur for increased performance at the expense of image quality.
+ (void) setBlurOnViewPerformanceMode:(UIView *)view {
//http://stackoverflow.com/questions/17041669/creating-a-blurring-overlay-view
UIImage *screenShot = [self imageWithImage:[self captureScreenInRect:view.frame inView:view] scaledToSize:CGSizeMake(view.frame.size.width / 2, view.frame.size.height / 2)];
CIImage *inputImage = [CIImage imageWithCGImage:screenShot.CGImage];
//CIContext *context = [CIContext contextWithOptions:nil];
if ([GlobalData getInstance].ciContext == nil) {
[Logger Log:#"ciContext does not exist, creating..." fromClass:#"ScreenBlur"];
// [GlobalData getInstance].ciContext = [CIContext contextWithOptions:nil]; //cpu context
[GlobalData getInstance].ciContext = [CIContext contextWithEAGLContext:[[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2]];
}
//set up the blur filter
CIFilter *filter = [CIFilter filterWithName:#"CIGaussianBlur"];
[filter setValue:inputImage forKey:kCIInputImageKey];
[filter setValue:[NSNumber numberWithFloat:3.0f] forKey:#"inputRadius"];
CIImage *result = [filter valueForKey:kCIOutputImageKey];
//CGImageRef cgImage = [[GlobalData getInstance].ciContext createCGImage:result fromRect:[inputImage extent]];
CGImageRef cgImage = [[GlobalData getInstance].ciContext createCGImage:result fromRect:[inputImage extent]];
[self setImage:[self imageWithImage:[UIImage imageWithCGImage:cgImage] scaledToSize:view.frame.size] toView:view];
}
+ (UIImage *)imageWithImage:(UIImage *)image scaledToSize:(CGSize)newSize {
//UIGraphicsBeginImageContext(newSize);
UIGraphicsBeginImageContextWithOptions(newSize, NO, 0.0);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
+ (void) removeBlurFromView:(UIView *)view {
for (UIView *subView in view.subviews) {
if (subView.tag == 6110) {
[subView removeFromSuperview];
}
}
}
+ (UIImage *)captureScreenInRect:(CGRect)captureFrame inView:(UIView*) view {
CALayer *layer;
layer = view.layer;
UIGraphicsBeginImageContext(view.bounds.size);
CGContextClipToRect (UIGraphicsGetCurrentContext(),captureFrame);
[layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *screenImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return screenImage;
}
#end
And then in my view controller, it's simply called with
[ScreenBlur setBlurOnView:self.view];
I found a workaround for this (or, who knows, maybe this is how it was supposed to be done).
//ScreenBlur.m
+ (GPUImageView*) getBlurredImageWithGPUImageFromView:(UIView*)view {
GPUImagePicture *imageSource = [[GPUImagePicture alloc] initWithImage:[self captureScreenInRect:view.frame inView:view] smoothlyScaleOutput:true];
GPUImageFastBlurFilter *blur = [[GPUImageFastBlurFilter alloc] init];
[blur setBlurPasses:3];
[imageSource addTarget:blur];
GPUImageView *filteredView = [[GPUImageView alloc] initWithFrame:view.frame];
[blur addTarget:filteredView];
[imageSource processImage];
return filteredView;
}
//ViewController.m
//blur the main screen
GPUImageView *blurred = [ScreenBlur getBlurredImageWithGPUImageFromView:self.view];
[blurred setTag:6110];
[self.view addSubview:blurred];
[self.view bringSubviewToFront:blurred];