I use SceneKit to display .obj file. But to get an .obj file I use SDK from a sensor, so this sensor scans the arm of a man and returns the .obj file as a result. But when I load the .obj file, there are a lot of not proper parts (part of chair, part of the surface and so on), I need to remove these parts of the object, so as a result I has to see only the arm of the man.
So for example I want to select a rectangle or a sphere and to remove all vertices and faces in this sphere.
Are there any SDK or frameworks in iOS to do that?
P.S. I tried nineveh and some other frameworks, but they can only view objects, they can't edit them.
Edit
I found the code to manipulate vertices (it merges vertices from different child nodes) in SceneKit. Can I use the same approach to find vertices I need to remove (that are inside my rectangle) or it will be very slow with 65 K vertices?
//
// VertexManager.m
// Test
//
#import "VertexManager.h"
#import <SceneKit/SceneKit.h>
#import <GLKit/GLKit.h>
#implementation VertexManager
+ (SCNNode *) flattenNodeHierarchy:(SCNNode *) input
{
SCNNode *result = [SCNNode node];
NSUInteger nodeCount = [[input childNodes] count];
if(nodeCount > 0){
SCNNode *node = [[input childNodes] objectAtIndex:0];
NSArray *vertexArray = [node.geometry geometrySourcesForSemantic:SCNGeometrySourceSemanticVertex];
SCNGeometrySource *vertex = [vertexArray objectAtIndex:0];
SCNGeometryElement *element = [node.geometry geometryElementAtIndex:0]; //todo: support multiple elements
NSUInteger primitiveCount = element.primitiveCount;
NSUInteger newPrimitiveCount = primitiveCount * nodeCount;
size_t elementBufferLength = newPrimitiveCount * 3 * sizeof(int); //nTriangle x 3 vertex * size of int
int* elementBuffer = (int*)malloc(elementBufferLength);
/* simple case: here we consider that all the objects to flatten are the same
In the regular case we should iterate on every geometry and accumulate the number of vertex/triangles etc...*/
NSUInteger vertexCount = [vertex vectorCount];
NSUInteger newVertexCount = vertexCount * nodeCount;
SCNVector3 *newVertex = malloc(sizeof(SCNVector3) * newVertexCount);
SCNVector3 *newNormal = malloc(sizeof(SCNVector3) * newVertexCount); //assume same number of normal/vertex
//fill
NSUInteger vertexFillIndex = 0;
NSUInteger primitiveFillIndex = 0;
for(NSUInteger index=0; index< nodeCount; index++){
#autoreleasepool {
node = [[input childNodes] objectAtIndex:index];
NSArray *vertexArray = [node.geometry geometrySourcesForSemantic:SCNGeometrySourceSemanticVertex];
NSArray *normalArray = [node.geometry geometrySourcesForSemantic:SCNGeometrySourceSemanticNormal];
SCNGeometrySource *vertex = [vertexArray objectAtIndex:0];
SCNGeometrySource *normals = [normalArray objectAtIndex:0];
if([vertex bytesPerComponent] != sizeof(float)){
NSLog(#"todo: support other byte per component");
continue;
}
float *vertexBuffer = (float *)[[vertex data] bytes];
float *normalBuffer = (float *)[[normals data] bytes];
SCNMatrix4 t = [node transform];
GLKMatrix4 matrix = MyGLKMatrix4FromCATransform3D(t);
//append source
for(NSUInteger vIndex = 0; vIndex < vertexCount; vIndex++, vertexFillIndex++){
GLKVector3 v = GLKVector3Make(vertexBuffer[vIndex * 3], vertexBuffer[vIndex * 3+1], vertexBuffer[vIndex * 3 + 2]);
GLKVector3 n = GLKVector3Make(normalBuffer[vIndex * 3], normalBuffer[vIndex * 3+1], normalBuffer[vIndex * 3 + 2]);
//transform
v = GLKMatrix4MultiplyVector3WithTranslation(matrix, v);
n = GLKMatrix4MultiplyVector3(matrix, n);
newVertex[vertexFillIndex] = SCNVector3Make(v.x, v.y, v.z);
newNormal[vertexFillIndex] = SCNVector3Make(n.x, n.y, n.z);
}
//append elements
//here we assume that all elements are SCNGeometryPrimitiveTypeTriangles
SCNGeometryElement *element = [node.geometry geometryElementAtIndex:0];
const void *inputPrimitive = [element.data bytes];
size_t bpi = element.bytesPerIndex;
NSUInteger offset = index * vertexCount;
for(NSUInteger pIndex = 0; pIndex < primitiveCount; pIndex++, primitiveFillIndex+=3){
elementBuffer[primitiveFillIndex] = offset + _getIndex(inputPrimitive, bpi, pIndex*3);
elementBuffer[primitiveFillIndex+1] = offset + _getIndex(inputPrimitive, bpi, pIndex*3+1);
elementBuffer[primitiveFillIndex+2] = offset + _getIndex(inputPrimitive, bpi, pIndex*3+2);
}
}
}
NSArray *sources = #[[SCNGeometrySource geometrySourceWithVertices:newVertex count:newVertexCount],
[SCNGeometrySource geometrySourceWithNormals:newNormal count:newVertexCount]];
NSData *newElementData = [NSMutableData dataWithBytesNoCopy:elementBuffer length:elementBufferLength freeWhenDone:YES];
NSArray *elements = #[[SCNGeometryElement geometryElementWithData:newElementData
primitiveType:SCNGeometryPrimitiveTypeTriangles
primitiveCount:newPrimitiveCount bytesPerIndex:sizeof(int)]];
result.geometry = [SCNGeometry geometryWithSources:sources elements:elements];
//cleanup
free(newVertex);
free(newNormal);
}
return result;
}
//helpers:
GLKMatrix4 MyGLKMatrix4FromCATransform3D(SCNMatrix4 transform) {
GLKMatrix4 m = {{transform.m11, transform.m12, transform.m13, transform.m14,
transform.m21, transform.m22, transform.m23, transform.m24,
transform.m31, transform.m32, transform.m33, transform.m34,
transform.m41, transform.m42, transform.m43, transform.m44}};
return m;
}
GLKVector3 MySCNVector3ToGLKVector3(SCNVector3 vector) {
GLKVector3 v = {{vector.x, vector.y, vector.z}};
return v;
}
#end
No. You'll want to use a 3d tool like Blender, Maya, 3ds Max, or Cheetah 3D.
Related
I'm very, very new to Objective-C (this is for a React Native app), and I am trying to pare down a list of colors and remove duplicates.
I've got this so far:
... code related to pulling pixels from an image
float flexibility = 2;
float range = 60;
NSMutableArray * colors = [NSMutableArray new];
float x = 0;
float y = 0;
for (int n = 0; n<(width*height); n++){
int index = (bytesPerRow * y) + x * bytesPerPixel;
int red = rawData[index];
int green = rawData[index + 1];
int blue = rawData[index + 2];
int alpha = rawData[index + 3];
NSArray * a = [NSArray arrayWithObjects:[NSString stringWithFormat:#"%i",red],[NSString stringWithFormat:#"%i",green],[NSString stringWithFormat:#"%i",blue],[NSString stringWithFormat:#"%i",alpha], nil];
[colors addObject:a];
y++;
if (y==height){
y=0;
x++;
}
}
free(rawData);
Now the script I am basing this off of has some code to pare down the list:
NSMutableDictionary * colourCounter = [NSMutableDictionary new];
//count the occurences in the array
NSCountedSet *countedSet = [[NSCountedSet alloc] initWithArray:colors];
for (NSString *item in countedSet) {
NSUInteger count = [countedSet countForObject:item];
[colourCounter setValue:[NSNumber numberWithInteger:count] forKey:item];
}
//sort keys highest occurrence to lowest
NSArray *orderedKeys = [colourCounter keysSortedByValueUsingComparator:^NSComparisonResult(id obj1, id obj2){
return [obj2 compare:obj1];
}];
//checks if the colour is similar to another one already included
NSMutableArray * ranges = [NSMutableArray new];
for (NSString * key in orderedKeys){
NSArray * rgb = [key componentsSeparatedByString:#","];
int r = [rgb[0] intValue];
int g = [rgb[1] intValue];
int b = [rgb[2] intValue];
bool exclude = false;
for (NSString * ranged_key in ranges){
NSArray * ranged_rgb = [ranged_key componentsSeparatedByString:#","];
int ranged_r = [ranged_rgb[0] intValue];
int ranged_g = [ranged_rgb[1] intValue];
int ranged_b = [ranged_rgb[2] intValue];
if (r>= ranged_r-range && r<= ranged_r+range){
if (g>= ranged_g-range && g<= ranged_g+range){
if (b>= ranged_b-range && b<= ranged_b+range){
exclude = true;
}
}
}
}
if (!exclude){ [ranges addObject:key]; }
}
//return ranges array here if you just want the ordered colours high to low
NSMutableArray * colourArray = [NSMutableArray new];
for (NSString * key in ranges){
NSArray * rgb = [key componentsSeparatedByString:#","];
float r = [rgb[0] floatValue];
float g = [rgb[1] floatValue];
float b = [rgb[2] floatValue];
UIColor * colour = [UIColor colorWithRed:(r/255.0f) green:(g/255.0f) blue:(b/255.0f) alpha:1.0f];
[colourArray addObject:colour];
}
//if you just want an array of images of most common to least, return here
//return [NSDictionary dictionaryWithObject:colourArray forKey:#"colours"];
I ultimately want to return this colourArray (the original library uses colour, I use color, so you'll sometimes see one or the other spelling - I'll probably standardize it to color when finished).
However, whenever I use this code, RN generates an error:
As far as I can tell, this is due to calling a function that doesn't exist? Is there something I am missing here, or calling incorrectly?
I am working on a project using Neurosky MindWave to create an IOS app. I am using the ThinkGear SDK for IOS available here: http://developer.neurosky.com/docs/doku.php?id=beta
The project reads values from the MindWave device and stores them in a NSDictionary in Objective C implementation file. I am trying to access these values in my Swift file to control 3D animations in SceneKit.
Here's the set up in the header file:
// the eSense values
typedef struct {
int attention;
int meditation;
} ESenseValues;
// the EEG power bands
typedef struct {
int delta;
int theta;
int lowAlpha;
int highAlpha;
int lowBeta;
int highBeta;
int lowGamma;
int highGamma;
} EEGValues;
#interface RootViewController : UITableViewController <TGAccessoryDelegate> {
short rawValue;
int rawCount;
int buffRawCount;
int blinkStrength;
int poorSignalValue;
int heartRate;
float respiration;
int heartRateAverage;
int heartRateAcceleration;
ESenseValues eSenseValues;
EEGValues eegValues;
bool logEnabled;
NSFileHandle * logFile;
NSString * output;
UIView * loadingScreen;
NSThread * updateThread;
}
// TGAccessoryDelegate protocol methods
- (void)accessoryDidConnect:(EAAccessory *)accessory;
- (void)accessoryDidDisconnect;
- (void)dataReceived:(NSDictionary *)data;
- (UIImage *)updateSignalStatus;
- (void)initLog;
- (void)writeLog;
#property (nonatomic, retain) IBOutlet UIView * loadingScreen;
Here's the Objective C code which stores the values in a dictionary:
// This method gets called by the TGAccessoryManager when data is received from the
// ThinkGear-enabled device.
- (void)dataReceived:(NSDictionary *)data {
[data retain];
NSString * temp = [[NSString alloc] init];
NSDate * date = [NSDate date];
// check to see whether the eSense values are there. if so, we assume that
// all of the other data (aside from raw) is there. this is not necessarily
// a safe assumption.
if([data valueForKey:#"eSenseAttention"]){
eSenseValues.attention = [[data valueForKey:#"eSenseAttention"] intValue];
eSenseValues.meditation = [[data valueForKey:#"eSenseMeditation"] intValue];
temp = [temp stringByAppendingFormat:#"%f: Attention: %d\n", [date timeIntervalSince1970], eSenseValues.attention];
temp = [temp stringByAppendingFormat:#"%f: Meditation: %d\n", [date timeIntervalSince1970], eSenseValues.meditation];
eegValues.delta = [[data valueForKey:#"eegDelta"] intValue];
eegValues.theta = [[data valueForKey:#"eegTheta"] intValue];
eegValues.lowAlpha = [[data valueForKey:#"eegLowAlpha"] intValue];
eegValues.highAlpha = [[data valueForKey:#"eegHighAlpha"] intValue];
eegValues.lowBeta = [[data valueForKey:#"eegLowBeta"] intValue];
eegValues.highBeta = [[data valueForKey:#"eegHighBeta"] intValue];
eegValues.lowGamma = [[data valueForKey:#"eegLowGamma"] intValue];
eegValues.highGamma = [[data valueForKey:#"eegHighGamma"] intValue];
}
if(logEnabled) {
[output release];
output = [[NSString stringWithString:temp] retain];
[self performSelectorOnMainThread:#selector(writeLog) withObject:nil waitUntilDone:NO];
}
//[temp release];
// release the parameter
[data release];
}
Whenever I try to get any of these values I get "Use of unresolved identifier" or similar error. I am trying to display them as 3D text in SceneKit:
override func viewDidLoad() {
super.viewDidLoad()
// Create Scene
let scene = SCNScene()
let myText = SCNText(string: attention , extrusionDepth: 5)
myText.font = UIFont(name: "Optima", size: 1)
let myTextNode = SCNNode(geometry: myText)
myTextNode.position = SCNVector3(x: -2, y: -1, z: -7)
myTextNode.orientation = SCNQuaternion(x: 0.1, y: 0, z: 0.5, w: 0)
scene.rootNode.addChildNode(myTextNode)
}
I'm very new to IOS development and have been working on how to do this for nearly a week now. Any clues on being able to access the values would be very much appreciated.
I'm developing a mobile application for iOS related to voice recording.
Due to that fact, I'm developing some different sound effects to modify recorded voice but I have a problem to implement some of them.
I'm trying to create echo/delay effect and I need to transform a byte array into a short array but I have no idea how to do it in Objective-C.
Thanks.
This is my current source code to implement it, but like byte is a very short type, when I apply attenuation (what must return a float value) produce an awful noise in my audio.
- (NSURL *)echo:(NSURL *)input output:(NSURL *)output{
int delay = 50000;
float attenuation = 0.5f;
NSMutableData *audioData = [NSMutableData dataWithContentsOfURL:input];
NSUInteger dataSize = [audioData length] - 44;
NSUInteger audioLength = [audioData length];
NSUInteger newAudioLength = audioLength + delay;
// Copy bytes
Byte *byteData = (Byte*)malloc(audioLength);
memcpy(byteData, [audioData bytes], audioLength);
short *shortData = (short*)malloc(audioLength/2);
// create a new array to store new modify data
Byte *newByteData = (Byte*)malloc(newAudioLength);
newByteData = byteData;
for (int i = 44; i < audioLength - delay; i++)
{
newByteData[i + delay] += byteData[i] * attenuation;
}
// Copy bytes in a new NSMutableData
NSMutableData *newAudioData = [NSMutableData dataWithBytes:newByteData length:newAudioLength];
// Store in a file
[newAudioData writeToFile:[output path] atomically:YES];
// Set WAV size
[[AudioUtils alloc] setAudioFileSize:output];
return output;
}
Finally, I could finish my echo effect implementing these four methods. I hope they will be useful for you.
Byte to short array
- (short *) byte2short:(Byte *)bytes size:(int)size resultSize:(int)resultSize{
short *shorts = (short *)malloc(sizeof(short)*resultSize);
for (int i=0; i < size/2; i++){
shorts[i] = (bytes[i*2+1] << 8) | bytes[i*2];
}
return shorts;
}
Short to byte array
- (Byte *) short2byte:(short *)shorts size:(int)size resultSize:(int)resultSize{
Byte *bytes = (Byte *)malloc(sizeof(Byte)*resultSize);
for (int i = 0; i < size; i++)
{
bytes[i * 2] = (Byte) (shorts[i] & 0x00FF);
bytes[(i * 2) + 1] = (Byte) (shorts[i] >> 8);
shorts[i] = 0;
}
return bytes;
}
Effect
- (NSMutableData *) effect:(NSMutableData *)data delay:(int)delay attenuation:(float)attenuation{
NSUInteger audioLength = [data length];
// Copy original data in a byte array
Byte *byteData = (Byte*)malloc(sizeof(Byte)*audioLength);
memcpy(byteData, [data bytes], audioLength);
short *shortData = (short*)malloc(sizeof(short)*(audioLength/2 + delay));
shortData = [self byte2short:byteData size:(int)audioLength resultSize:(int)audioLength/2 + delay];
// Array to store shorts
short *newShortData = shortData;
for (int i = 44; i < audioLength/2; i++)
{
newShortData[i + delay] += (short)((float)shortData[i] * attenuation);
}
Byte *newByteData = [self short2byte:newShortData size:(int)(audioLength/2 + delay) resultSize:(int)(audioLength + delay*2)];
// Copy bytes to a NSMutableData in order to create new file
NSMutableData *newAudioData = [NSMutableData dataWithBytes:newByteData length:(int)(audioLength + delay*2)];
return newAudioData;
}
Echo effect
- (NSURL *)echo:(NSURL *)input output:(NSURL *)output{
NSMutableData *audioData = [NSMutableData dataWithContentsOfURL:input];
// we call effect method that returns a NSMutableData and create a new file
[[self effect:audioData delay:6000 attenuation:0.5f] writeToFile:[output path] atomically:YES];
// We set file's size (is a method I have implemented)
[[AudioUtils alloc] setAudioFileSize:output];
return output;
}
There's no predefined function that will create a short array from a byte array, but it should be fairly simple to do it with a for loop
// create a short array
short *shortData = malloc(sizeof(short)*audioLength);
for (i=0; i<bytearray.length, i++)
{
shortData[i] = byteData[i];
}
The code is not rigorously correct (meaning I didn't compile it, just wrote it here on the fly), but it should give you an idea on how to do it.
Also be aware that saving audio data with two bytes instead of one can give very different results when playing back, but I'll assume you know how to handle with audio data for your specific purposes.
I am new to ImageMagick,and i want to develop an effect of ShepardsDistortion on source image. i gone through many posts and sites, but i didn't find way to implement "ShepardsDistortion" in iOS.
MagickWand *mw = NewMagickWand();
MagickSetFormat(mw, "png");
UIImage *sourceImage=[_sourceImgView image];
NSData *imgData=UIImagePNGRepresentation(sourceImage);
MagickReadImageBlob(mw, [imgData bytes], [imgData length]);
Image *image=GetImageFromMagickWand(mw);
DistortImage(image, ShepardsDistortion, , ,);
I done upto this, but i dont know what to pass as arg in DitortImage(). So if anyone knows then help me.
EDIT:
-(void)distortImage{
MagickWandGenesis();
MagickWand * wand;
MagickBooleanType status;
wand = NewMagickWand();
MagickSetFormat(wand, "png");
status = MagickReadImage(wand,"chess.png");
// Arguments for Shepards
double points[8];
points[0] = 250; // First X point (starting)
points[1] = 250; // First Y point (starting)
points[2] = 50; // First X point (ending)
points[3] = 150; // First Y point (ending)
points[4] = 500; // Second X point (starting)
points[5] = 380; // Second Y point (starting)
points[6] = 600; // Second X point (ending)
points[7] = 460; // Second Y point (ending)
MagickDistortImage(wand,ShepardsDistortion,8,points,MagickFalse);
NSString * tempFilePath = [NSTemporaryDirectory() stringByAppendingPathComponent:#"out.png"];
MagickWriteImage(wand,[tempFilePath cStringUsingEncoding:NSASCIIStringEncoding]);
UIImage * imgObj = [UIImage imageWithContentsOfFile:tempFilePath];
_resultImgView.image=imgObj;
//
// unsigned char * cBlob;
// size_t data_size;
// cBlob = MagickGetImageBlob(wand, &data_size);
// NSData * nsBlob = [NSData dataWithBytes:cBlob length:data_size];
// UIImage *uiImage = [UIImage imageWithData:nsBlob];
// _resultImgView.image=uiImage;
MagickWriteImage(wand,"out.png");
wand=DestroyMagickWand(wand);
MagickWandTerminus();
}
This might help:
MagickWandGenesis();
magick_wand = NewMagickWand();
double points[24];
points[0] = 250;
points[1] = 250;
points[2] = 50;
points[3] = 150;
points[4] = 0;
points[5] = 0;
points[6] = 0;
points[7] = 0;
points[8] = self.frame.width;
points[9] = 0;
points[10] = self.frame.width;
points[11] = 0;
points[12] = self.frame.width;
points[13] = self.frame.height;
points[14] = self.frame.width;
points[15] = self.frame.height;
points[16] = self.frame.width;
points[17] = self.frame.height;
points[18] = self.frame.width;
points[19] = self.frame.height;
points[20] = 0;
points[21] = self.frame.height;
points[22] = 0;
points[23] = self.frame.height;
NSData * dataObject = UIImagePNGRepresentation([UIImage imageNamed:#"Imagemagick-logo.png"]);//UIImageJPEGRepresentation([imageViewButton imageForState:UIControlStateNormal], 90);
MagickBooleanType status;
status = MagickReadImageBlob(magick_wand, [dataObject bytes], [dataObject length]);
if (status == MagickFalse) {
ThrowWandException(magick_wand);
}
// posterize the image, this filter uses a configuration file, that means that everything in IM should be working great
status = MagickDistortImage(magick_wand,ShepardsDistortion,24,points,MagickFalse);
//status = MagickOrderedPosterizeImage(magick_wand, "h8x8o");
if (status == MagickFalse) {
ThrowWandException(magick_wand);
}
size_t my_size;
unsigned char * my_image = MagickGetImageBlob(magick_wand, &my_size);
NSData * data = [[NSData alloc] initWithBytes:my_image length:my_size];
free(my_image);
magick_wand = DestroyMagickWand(magick_wand);
MagickWandTerminus();
UIImage * image = [[UIImage alloc] initWithData:data];
[data release];
[imageViewButton setImage:image forState:UIControlStateNormal];
[image release];
Arguments are passed to DistortImage as the start of a list of doubles, and size information about the list. Example:
size_t SizeOfPoints = 8;
double Points[SizeOfPoints];
DistortImage(image,
ShepardsDistoration,
SizeOfPoints,
Points,
MagickFalse,
NULL
);
In your example, you seem to be mixing MagickWand & MagickCore methods; which, seems unnecessary and confusing. I would keep this distortion simple, and only use MagickWand's MagickDistortImage method. Here's a example in c
int main(int argc. const char **argv)
{
MagickWandGenesis();
MagickWand * wand;
MagickBooleanType status;
wand = NewMagickWand();
status = MagickReadImage(wand,"logo:");
// Arguments for Shepards
double points[8];
// 250x250 -> 50x150
points[0] = 250; // First X point (starting)
points[1] = 250; // First Y point (starting)
points[2] = 50; // First X point (ending)
points[3] = 150; // First Y point (ending)
// 500x380 -> 600x460
points[4] = 500; // Second X point (starting)
points[5] = 380; // Second Y point (starting)
points[6] = 600; // Second X point (ending)
points[7] = 460; // Second Y point (ending)
MagickDistortImage(wand,ShepardsDistortion,8,points,MagickFalse);
MagickWriteImage(wand,"out.png");
wand=DestroyMagickWand(wand);
MagickWandTerminus();
return 0;
}
Resulting in a distorted translated image (details)
Edit
For iOS, you can use NSTemporaryDirectory (like in this answer), or create an image dynamically using NSData (like in this question).
Example with temporary path:
NSString * tempFilePath = [NSTemporaryDirectory()
stringByAppendingPathComponent:#"out.png"];
MagickWriteImage(self.wand,
[tempFilePath cStringUsingEncoding:NSASCIIStringEncoding]);
UIImage * imgObj = [UIImage imageWithContentsOfFile:tempFilePath];
And an example with NSData + blob
unsigned char * cBlob;
size_t data_size;
cBlob = MagickGetImageBlob(wand, &data_size);
NSData * nsBlob = [NSData dataWithBytes:cBlob length:data_size];
UIImage * uiImage = [UIImage imageWithData:nsBlob];
I'm trying to build up an NSMutableDictionary to be converted to be json'ed. One of my key-values are the bytes for a png representation of a picture. So I have something like
NSMutableDictionary *jsonDict = [[NSMutableDictionary alloc] init];
....
if ([self hasPhoto])
{
result[#"photo"] = UIImagePNGRepresentation(self.photo);
}
This later blows up, because NSJSONSerialization doesn't do things like NSData objects which is what UIImagePNGRepresenation returns. What is a good way to encode the data? Just UTF8'ing would likely be bad. I'm not that familiar with the guts of what are legal string representations in json.
Update:
I ended up finding this link about using Apple's builtin but unadvertised functions. The code was longer, but I was 2 files less:
NSData *data = UIImageJPEGRepresentation(self.photo, 0.5);
NSString *base64 = nil;
NSUInteger sourceLength = data.length;
NSUInteger encodeBufferLength = ((sourceLength + 2) / 3) * 4 + 1;
char *encodeBuffer = malloc(encodeBufferLength);
int encodedRealLength = b64_ntop(data.bytes, sourceLength, encodeBuffer, encodeBufferLength);
if (encodedRealLength >= 0)
{
base64 = [[NSString alloc] initWithBytesNoCopy: encodeBuffer length: encodedRealLength + 1 encoding: NSASCIIStringEncoding freeWhenDone: YES];
}
else
{
free(encodeBuffer);
}
result[#"photo-jpeg"] = base64;
This also runs about 7X faster than the Base64 solution below. Not that speed mattered AT ALL in this particular case, but someone asked.
You need to convert your image to base64 string .So that you can pass transmit using JSON. Here is the link to download base64 class files.
Then initialize this in your view controller.m:
[Base64 initialize];
After that convert your image to NSData and use this code:
NSString *strEncoded = [Base64 encode:webDat];
where webdat is an NSData.
Typically, images are encoded as a base64 string for JSON. There is no built-in way to encode NSData to a base64 string, but the algorithm to do so is fairly well known. Create an NSData category like so:
static char encodingTable[64] = {
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P',
'Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f',
'g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v',
'w','x','y','z','0','1','2','3','4','5','6','7','8','9','+','/' };
#implementation NSData (Base64)
- (NSString *) base64EncodingWithLineLength:(unsigned int) lineLength {
const unsigned char *bytes = [self bytes];
NSMutableString *result = [NSMutableString stringWithCapacity:[self length]];
unsigned long ixtext = 0;
unsigned long lentext = [self length];
long ctremaining = 0;
unsigned char inbuf[3], outbuf[4];
short i = 0;
short charsonline = 0, ctcopy = 0;
unsigned long ix = 0;
while( YES ) {
ctremaining = lentext - ixtext;
if( ctremaining <= 0 ) break;
for( i = 0; i < 3; i++ ) {
ix = ixtext + i;
if( ix < lentext ) inbuf[i] = bytes[ix];
else inbuf [i] = 0;
}
outbuf [0] = (inbuf [0] & 0xFC) >> 2;
outbuf [1] = ((inbuf [0] & 0x03) << 4) | ((inbuf [1] & 0xF0) >> 4);
outbuf [2] = ((inbuf [1] & 0x0F) << 2) | ((inbuf [2] & 0xC0) >> 6);
outbuf [3] = inbuf [2] & 0x3F;
ctcopy = 4;
switch( ctremaining ) {
case 1:
ctcopy = 2;
break;
case 2:
ctcopy = 3;
break;
}
for( i = 0; i < ctcopy; i++ )
[result appendFormat:#"%c", encodingTable[outbuf[i]]];
for( i = ctcopy; i < 4; i++ )
[result appendFormat:#"%c",'='];
ixtext += 3;
charsonline += 4;
if( lineLength > 0 ) {
if (charsonline >= lineLength) {
charsonline = 0;
[result appendString:#"\n"];
}
}
}
return result;
}
#end
Encoding for JSON then becomes trivial:
NSData *imageData = UIImagePNGRepresentation(self.photo);
NSString *base64String = [imageData base64EncodingWithLineLength:0];
result[#"photo] = base64String;
Note that images taken with iOS devices are quite large. The number of bytes you are transmitting is equal to the width x height x 3. For example an iPhone 5 has an 8MP camera, that is 3264 x 2448 x 3 = 23MB of data. You almost never want to transmit that much data via JSON. So, you'll want to crop the photo or resize it down to something more manageable before sending.