I want to develop a small Jigsaw puzzle game but having problem when combining the image pieces. I can split image but cannot combine them as per my requirement. Here is what I am doing.
For cropping:
[customImageView setImage:[self cropImage:self.mainImage withRect:mCropFrame]];
- (UIImage *) cropImage:(UIImage*)originalImage withRect:(CGRect)rect
{
return [UIImage imageWithCGImage:CGImageCreateWithImageInRect([originalImage CGImage], rect)];
}
For Clipping:
[self setClippingPath:[pieceBezierPathsMutArray_ objectAtIndex:i]:view];
- (UIImageView *) setClippingPath:(UIBezierPath *)clippingPath : (UIImageView *)imgView;
{
if (![[imgView layer] mask])
{
[[imgView layer] setMask:[CAShapeLayer layer]];
}
[(CAShapeLayer*) [[imgView layer] mask] setPath:[clippingPath CGPath]];
return imgView;
}
For Combining:
-(id)initByCombining:(id)oneView andOther:(id)twoView withRegularSize:(CGSize)pieceSize;
{
CustomImageView *one = oneView;//[oneView copy];
CustomImageView *two = twoView;
CGPoint onepoint, twopoint;
if (one.frame.origin.x < two.frame.origin.x)
{
onepoint.x = 0;
twopoint.x = onepoint.x + one.frame.size.width;
}
else
{
onepoint.x = onepoint.x + one.frame.size.width;
twopoint.x = 0;
}
if (one.frame.origin.y < two.frame.origin.y)
{
onepoint.y = 0;
twopoint.y = 0;
}
else
{
onepoint.y = 0;
twopoint.y = 0;
}
CGRect frame;
frame.origin = CGPointZero;
frame.size.width = onepoint.x + one.frame.size.width + two.frame.size.width;
frame.size.height = MAX(one.frame.size.height , two.frame.size.height);
if (self = [self initWithFrame:frame])
{
UIGraphicsPushContext(UIGraphicsGetCurrentContext());
UIGraphicsBeginImageContext(frame.size);
[one.image drawAtPoint:onepoint];
[two.image drawAtPoint:twopoint];
[self.layer renderInContext:UIGraphicsGetCurrentContext()];
self.image = UIGraphicsGetImageFromCurrentImageContext();
self.backgroundColor = [UIColor redColor];
UIGraphicsEndImageContext();
UIGraphicsPopContext();
self.center = one.center;
self.transform = CGAffineTransformScale(incomingTransform, 0.5, 0.5);
self.previousRotation = self.transform;
}
return self;
}
My initial image is this:
After cropping and clipping it becomes like this:
It should look like this after combining.
But it is becoming like this.
When You want to combine the Images I would suggest you to place the Clipping within another UIView , so that the UIView becomes a SuperView of all the Placed Clippings, after the Images have been Placed you can do something like this,
UIGraphicsBeginImageContextWithOptions(superView.bounds.size, superView.opaque, 0.0);
[superView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage * CombinedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return CombinedImage;
and Then just save it as follows..
ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
[library writeImageToSavedPhotosAlbum:[CombinedImage CGImage] orientation:(ALAssetOrientation)[SAVEIMAGE imageOrientation] completionBlock:^(NSURL *assetURL, NSError *error){
if (error) {
// TODO: error handling
UIAlertView *al=[[UIAlertView alloc]initWithTitle:#"" message:#"Error saving image, Please try again" delegate:nil cancelButtonTitle:#"OK" otherButtonTitles:nil, nil];
[al show];
} else {
NSData *imageData = UIImagePNGRepresentation(CombinedImage);
UIImage *finalImage=[UIImage imageWithData:imageData];
}
..Hope this helps
Related
Earlier i used SDWebimage third party to display images, now i removed that 3rd party due some other reason. is there any other way to Display GIF image from URL in Imageview without using third party...
Well you can do it simply by following next steps:
Add the following functions
#import <ImageIO/ImageIO.h>
#import <MobileCoreServices/MobileCoreServices.h>
UIImage *GIFFromData(NSData *data) {
if (!data) {
return nil;
}
CGImageSourceRef source = CGImageSourceCreateWithData((__bridge CFDataRef)(data), NULL);
UIImage *image = nil;
if (GIFSourceContainsAnimatedGIF(source)) {
image = GIFFromImageSource(source);
} else {
image = [UIImage imageWithData:data];
}
if (source) {
CFRelease(source);
}
return image;
}
BOOL GIFSourceContainsAnimatedGIF(CGImageSourceRef source) {
return (source && UTTypeConformsTo(CGImageSourceGetType(source), kUTTypeGIF) && CGImageSourceGetCount(source) > 1);
}
UIImage *GIFFromImageSource(CGImageSourceRef source) {
CFRetain(source);
NSUInteger numberOfFrames = CGImageSourceGetCount(source);
NSMutableArray<UIImage *> *images = [NSMutableArray arrayWithCapacity:numberOfFrames];
NSTimeInterval duration = 0.0;
for (NSUInteger i = 0; i < numberOfFrames; ++i) {
CGImageRef image = CGImageSourceCreateImageAtIndex(source, i, NULL);
if (image) {
UIImage *frameImage = [UIImage imageWithCGImage:image scale:1.0 orientation:UIImageOrientationUp];
[images addObject:frameImage];
CFRelease(image);
} else {
continue;
}
duration += GIFSourceGetFrameDelay(source, i);
}
CFRelease(source);
return [UIImage animatedImageWithImages:images duration:duration];
}
NSTimeInterval GIFSourceGetFrameDelay(CGImageSourceRef source, NSUInteger index) {
NSTimeInterval frameDelay = 0;
CFDictionaryRef imageProperties = CGImageSourceCopyPropertiesAtIndex(source, index, NULL);
if (!imageProperties) {
return frameDelay;
}
CFDictionaryRef gifProperties = nil;
if (CFDictionaryGetValueIfPresent(imageProperties, kCGImagePropertyGIFDictionary, (const void **)&gifProperties)) {
const void *durationValue = nil;
if (CFDictionaryGetValueIfPresent(gifProperties, kCGImagePropertyGIFUnclampedDelayTime, &durationValue)) {
frameDelay = [(__bridge NSNumber *)durationValue doubleValue];
if (frameDelay <= 0) {
if (CFDictionaryGetValueIfPresent(gifProperties, kCGImagePropertyGIFDelayTime, &durationValue)) {
frameDelay = [(__bridge NSNumber *)durationValue doubleValue];
}
}
}
}
CFRelease(imageProperties);
return frameDelay;
}
Download your GIF with NSURLSession
Display it
UIImage *image = GIFFromData(data);
UIImageView *view = [[UIImageView alloc] initWithImage:image];
Hope this helps :)
UIImageView* animatedImageView = [[UIImageView alloc] initWithFrame:self.view.bounds];
animatedImageView.animationImages = [NSArray arrayWithObjects:
[UIImage imageNamed:#"image1.gif"],
[UIImage imageNamed:#"image2.gif"],
[UIImage imageNamed:#"image3.gif"],
[UIImage imageNamed:#"image4.gif"], nil];
animatedImageView.animationDuration = 1.0f;
animatedImageView.animationRepeatCount = 0;
[animatedImageView startAnimating];
[self.view addSubview: animatedImageView];
Add animated Gif image in Iphone UIImageView
I tried implementing this answer: https://stackoverflow.com/a/22716610, to the problem of adding overlays to mkmapsnapshotter in iOS7 (cant do renderInContext method). I did this as shown below, but the image returned has only the map with no overlays. Forgive me, I am quite new to this. Thanks.
-(void)mapViewDidFinishRenderingMap:(MKMapView *)mapView fullyRendered:(BOOL)fullyRendered
{
if (mapView.tag == 100) {
MKMapSnapshotOptions *options = [[MKMapSnapshotOptions alloc] init];
options.region = mapView.region;
options.size = mapView.frame.size;
options.scale = [[UIScreen mainScreen] scale];
MKMapSnapshotter *snapshotter = [[MKMapSnapshotter alloc] initWithOptions:options];
[snapshotter startWithCompletionHandler:^(MKMapSnapshot *snapshot, NSError *error) {
if (error) {
NSLog(#"[Error] %#", error);
return;
}
UIImage *image = snapshot.image;
UIGraphicsBeginImageContextWithOptions(image.size, YES, image.scale);
{
[image drawAtPoint:CGPointMake(0, 0)];
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetStrokeColorWithColor(context, [UIColor redColor].CGColor);
CGContextSetLineWidth(context,5.0f);
CGContextBeginPath(context);
bool first = YES;
NSArray *overlays = mapView.overlays;
for (id <MKOverlay> overlay in overlays) {
CGPoint point = [snapshot pointForCoordinate:overlay.coordinate];
if(first)
{
first = NO;
CGContextMoveToPoint(context,point.x, point.y);
}
else{
CGContextAddLineToPoint(context,point.x, point.y);
}
}
UIImage *compositeImage = UIGraphicsGetImageFromCurrentImageContext();
NSData *data = UIImagePNGRepresentation(compositeImage);
placeToSave = data;
NSLog(#"MapView Snapshot Saved.");
//show image for debugging
UIImageView *imageView = [[UIImageView alloc] initWithFrame:CGRectMake(0, 200, 320, 320)];
imageView.image = compositeImage;
[self.view addSubview:imageView];
}
UIGraphicsEndImageContext();
}];
[mapView setHidden:YES];
}
}
I am working on a photo taking app. The app's preview layer is set to take up exactly half of the screen using this code:
[_previewLayer setFrame:CGRectMake(0, 0, rootLayer.bounds.size.width, rootLayer.bounds.size.height/2)];
This looks perfect and there is no distortion at all while the user is viewing the camera's "preview" / what they are seeing while taking the picture.
However, once they actually take the photo, I create a sub layer and set it's frame property to my preview layer's property, and set the photo as the contents of the sub layer.
This does technically work. Once the user takes the photo, the photo shows up on the top half of the screen like it should.
The only problem is that the photo is distorted.
It looks stretched out, almost as if I'm taking a landscape photo.
Any help is greatly appreciated I am totally desperate on this and have not been able to fix it after working on it all day today.
Here is all of my view controller's code:
#import "MediaCaptureVC.h"
#interface MediaCaptureVC ()
#end
#implementation MediaCaptureVC
- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil
{
self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil];
if (self) {
// Custom initialization
}
return self;
}
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view.
AVCaptureSession *session =[[AVCaptureSession alloc]init];
[session setSessionPreset:AVCaptureSessionPresetPhoto];
AVCaptureDevice *inputDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSError *error = [[NSError alloc]init];
AVCaptureDeviceInput *deviceInput = [AVCaptureDeviceInput deviceInputWithDevice:inputDevice error:&error];
if([session canAddInput:deviceInput])
[session addInput:deviceInput];
_previewLayer = [[AVCaptureVideoPreviewLayer alloc]initWithSession:session];
[_previewLayer setVideoGravity:AVLayerVideoGravityResizeAspectFill];
CALayer *rootLayer = [[self view]layer];
[rootLayer setMasksToBounds:YES];
[_previewLayer setFrame:CGRectMake(0, 0, rootLayer.bounds.size.width, rootLayer.bounds.size.height/2)];
[rootLayer insertSublayer:_previewLayer atIndex:0];
_stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
[session addOutput:_stillImageOutput];
[session startRunning];
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
-(UIImage*) rotate:(UIImage*) src andOrientation:(UIImageOrientation)orientation
{
UIGraphicsBeginImageContext(src.size);
CGContextRef context=(UIGraphicsGetCurrentContext());
if (orientation == UIImageOrientationRight) {
CGContextRotateCTM (context, 90/180*M_PI) ;
} else if (orientation == UIImageOrientationLeft) {
CGContextRotateCTM (context, -90/180*M_PI);
} else if (orientation == UIImageOrientationDown) {
// NOTHING
} else if (orientation == UIImageOrientationUp) {
CGContextRotateCTM (context, 90/180*M_PI);
}
[src drawAtPoint:CGPointMake(0, 0)];
UIImage *img=UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return img;
}
-(IBAction)stillImageCapture {
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in _stillImageOutput.connections){
for (AVCaptureInputPort *port in [connection inputPorts]){
if ([[port mediaType] isEqual:AVMediaTypeVideo]){
videoConnection = connection;
break;
}
}
if (videoConnection) {
break;
}
}
NSLog(#"about to request a capture from: %#", _stillImageOutput);
[_stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) {
if(imageDataSampleBuffer) {
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];
UIImage *image = [[UIImage alloc]initWithData:imageData];
image = [self rotate:image andOrientation:image.imageOrientation];
CALayer *subLayer = [CALayer layer];
CGImageRef imageRef = image.CGImage;
subLayer.contents = (id)[UIImage imageWithCGImage:imageRef].CGImage;
subLayer.frame = _previewLayer.frame;
CALayer *rootLayer = [[self view]layer];
[rootLayer setMasksToBounds:YES];
[subLayer setFrame:CGRectMake(0, 0, rootLayer.bounds.size.width, rootLayer.bounds.size.height/2)];
[_previewLayer addSublayer:subLayer];
NSLog(#"%#", subLayer.contents);
NSLog(#"Orientation: %d", image.imageOrientation);
}
}];
}
#end
Hi I hope this helps you -
The code seems more complex than it should be because most of the code is done on the CALayer level instead of the imageView / view level however I think the issue is that the proportion of the frame from the original capture to your mini viewport is different and this is distorting the UIImage in this statement :
[subLayer setFrame:CGRectMake(0, 0, rootLayer.bounds.size.width, rootLayer.bounds.size.height/2)];
What needs to be done is to capture the proportion of sublayer.frame and get the best size that will fit into the rootlayer or the Image view associated with it
I have some code before that does this : coded a subroutine before that handles the proportion (Note you will need to adjust the origin of the frame to get what you want!)
...
CGRect newbounds = [self figure_proportion:image to_fit_rect(rootLayer.frame)
if (newbounds.size.height < rootLayer.frame.size.height) {
rootLayer ..... (Code to adjust origin of image view frame)
-(CGRect) figure_proportion:(UIImage *) image2 to_fit_rect:(CGRect) rect {
CGSize image_size = image2.size;
CGRect newrect = rect;
float wfactor = image_size.width/ image_size.height;
float hfactor = image_size.height/ image_size.width;
if (image2.size.width > image2.size.height) {
newrect.size.width = rect.size.width;
newrect.size.height = (rect.size.width * hfactor);
}
else if (image2.size.height > image2.size.width) {
newrect.size.height = rect.size.height;
newrect.size.width = (rect.size.height * wfactor);
}
else {
newrect.size.width = rect.size.width;
newrect.size.height = newrect.size.width;
}
if (newrect.size.height > rect.size.height) {
newrect.size.height = rect.size.height;
newrect.size.width = (newrect.size.height* wfactor);
}
if (newrect.size.width > rect.size.width) {
newrect.size.width = rect.size.width;
newrect.size.height = (newrect.size.width* hfactor);
}
return(newrect);
}
I have a problem to capture full screen of iCarousel. it can capture only index of Carousel only .
UIGraphicsBeginImageContext(caputureView.bounds.size);
[caputureView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *resultingImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
Try something like this:
- (void) getFullScreenScreenShot
{
AppDelegate* appDelegate = (AppDelegate*)[[UIApplication sharedApplication] delegate];
UIView* superView = appDelegate.viewController.view;
CGRect fullScreenFrame = superView.frame;
UIGraphicsBeginImageContextWithOptions(fullScreenFrame.size, YES, 0.0f);
CGContextTranslateCTM(UIGraphicsGetCurrentContext(), 0.0f, 0.0f);
[superView.layer renderInContext: UIGraphicsGetCurrentContext()];
UIImageView* screenShot = [[UIImageView alloc] initWithImage: UIGraphicsGetImageFromCurrentImageContext()];
UIGraphicsEndImageContext();
NSData* imageData = UIImageJPEGRepresentation(screenShot.image, 1.0);
NSString* previewFileNamePath = [[CPFileManager documentsPath] stringByAppendingString: #"image.jpg"];
if ([imageData writeToFile: previewFileNamePath
atomically: NO])
{
NSLog(#"See filename:%#", previewFileNamePath);
}
else
{
NSLog(#"Error: %#", previewFileNamePath);
}
}
Everything is working fine with FBProfilePictureView but I need to get that picture from FBProfilePictureView and turn it into an UIImage.
How should I do it?
I tried using this:
UIGraphicsBeginImageContext(self.profilePictureView.frame.size);
[self.view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
self.TestPictureOutlet.image = viewImage;
But this doesnt work for my solution.
FBProfilePictureView is a UIView, this UIView contains a UIImageView, that is your image, you can get the UIImage from that UIImageView:
profilePictureView is a FBProfilePictureView
UIImage *image = nil;
for (NSObject *obj in [profilePictureView subviews]) {
if ([obj isMemberOfClass:[UIImageView class]]) {
UIImageView *objImg = (UIImageView *)obj;
image = objImg.image;
break;
}
}
EDIT: add another way more quickly but do the same thing
__block UIImage *image = nil;
[self.view.subviews enumerateObjectsUsingBlock:^(NSObject *obj, NSUInteger idx, BOOL *stop) {
if ([obj isMemberOfClass:[UIImageView class]]) {
UIImageView *objImg = (UIImageView *)obj;
image = objImg.image;
*stop = YES;
}
}];
Above both mentioned solutions work fine to get UIImage object out from FBProfilePictureView.
Only thing is, You need to put some delay before to get image from FBProfilePictureView.
Like:
[FBRequest requestForMe] startWithCompletionHandler:
^(FBRequestConnection *connection, NSDictionary<FBGraphUser> *user, NSError *error) {
if (!error) {
myNameLbl.text = user.name;
profileDP.profileID = user.id;
//NOTE THIS LINE WHICH DOES THE MAGIC
[self performSelector:#selector(getUserImageFromFBView) withObject:nil afterDelay:1.0];
}];
- (void)getUserImageFromFBView{
UIImage *img = nil;
//1 - Solution to get UIImage obj
for (NSObject *obj in [profileDP subviews]) {
if ([obj isMemberOfClass:[UIImageView class]]) {
UIImageView *objImg = (UIImageView *)obj;
img = objImg.image;
break;
}
}
//2 - Solution to get UIImage obj
// UIGraphicsBeginImageContext(profileDP.frame.size);
// [profileDP.layer renderInContext:UIGraphicsGetCurrentContext()];
// img = UIGraphicsGetImageFromCurrentImageContext();
// UIGraphicsEndImageContext();
//Here I'm setting image and it works 100% for me.
testImgv.image = img;
}
Regards!
Aamir Ali -
iOS Apps Developer
#Time Group (TGD)
Here the solution.
steps:
Make sure that you assigned the FB user id to object of class
"FBProfilePictureView" in my case this class object is
"userPictureImageView"
-(void)saveFBUserImage
{
CGSize imageSize = self.userPictureImageView.frame.size;
UIGraphicsBeginImageContext(imageSize);
CGContextRef imageContext = UIGraphicsGetCurrentContext();
[self.userPictureImageView.layer renderInContext: imageContext];
UIImage* viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
NSData *imageData = UIImageJPEGRepresentation(viewImage,1);
UIImage * img = [UIImage imageWithData:imageData];
NSString *filePath = <specify your path here>;
CGSize size = img.size;
if(size.height > 50)
size.height = 50;
if(size.width > 50)
size.width = 50;
CGRect rect = CGRectMake(0.0f, 0.0f, size.width, size.height);
CGSize size2 = rect.size;
UIGraphicsBeginImageContext(size2);
[img drawInRect:rect];
img = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
NSData *newImageData = UIImageJPEGRepresentation(img, 1.0);
[newImageData writeToFile:filePath atomically:YES];
}
That's it. :-)
I have found above solutions to be quite working but here is updated code which i found it more easy going.
#property FBSDKProfilePictureView *pictureView;
if ([FBSDKAccessToken currentAccessToken]) {
self.pictureView=[[FBSDKProfilePictureView alloc]init];
[self.pictureView setProfileID:#"me"];
[self.pictureView setPreservesSuperviewLayoutMargins:YES];
[self.pictureView setPictureMode:FBSDKProfilePictureModeNormal];
[self.pictureView setNeedsImageUpdate];
[self performSelector:#selector(getUserImageFromFBView) withObject:nil afterDelay:1.0];
}
-(void) getUserImageFromFBView
{
UIImage *image = nil;
for (NSObject *obj in [self.pictureView subviews]) {
if ([obj isMemberOfClass:[UIImageView class]]) {
UIImageView *objImg = (UIImageView *)obj;
image = objImg.image;
break;
}
}
[self.profilePic setImage:image forState:UIControlStateNormal];
}
Hope this helps. Here i have put 1 second delay to wait for the profile picture to load.