I am trying to write code which put a sticker on eyes and the code is based on SquareCam.
It detects faces well, but when I tried to output my image on left eye, it always gives wrong position even though I used the same ways on finding face rect.
There are results on my phone.
And the code is here.
for ff in features as! [CIFaceFeature] {
// find the correct position for the square layer within the previewLayer
// the feature box originates in the bottom left of the video frame.
// (Bottom right if mirroring is turned on)
var faceRect = ff.bounds
let temp = faceRect.origin.x
faceRect.origin.x = faceRect.origin.y
faceRect.origin.y = temp
// scale coordinates so they fit in the preview box, which may be scaled
let widthScaleBy = previewBox.size.width / clap.size.height
let heightScaleBy = previewBox.size.height / clap.size.width
faceRect.size.width *= widthScaleBy
faceRect.size.height *= heightScaleBy
faceRect.origin.x *= widthScaleBy
faceRect.origin.y *= heightScaleBy
var eyeRect : CGRect
eyeRect = CGRect()
eyeRect.origin.x = ff.leftEyePosition.y
eyeRect.origin.y = ff.leftEyePosition.x
eyeRect.origin.x *= widthScaleBy
eyeRect.origin.y *= heightScaleBy
eyeRect.size.width = faceRect.size.width * 0.15
eyeRect.size.height = eyeRect.size.width
if isMirrored {
faceRect = faceRect.offsetBy(dx: previewBox.origin.x + previewBox.size.width - faceRect.size.width - (faceRect.origin.x * 2), dy: previewBox.origin.y)
eyeRect = eyeRect.offsetBy(dx:previewBox.origin.x + previewBox.size.width - eyeRect.size.width - (eyeRect.origin.x * 2),dy : previewBox.origin.y)
} else {
faceRect = faceRect.offsetBy(dx: previewBox.origin.x, dy: previewBox.origin.y)
eyeRect = eyeRect.offsetBy(dx: previewBox.origin.x, dy:previewBox.origin.y)
}
print(eyeRect)
print(faceRect)
var featureLayer: CALayer? = nil
var eyeLayer : CALayer? = nil
// re-use an existing layer if possible
while featureLayer == nil && (currentSublayer < sublayersCount) {
let currentLayer = sublayers[currentSublayer];currentSublayer += 1
if currentLayer.name == "FaceLayer" {
featureLayer = currentLayer
currentLayer.isHidden = false
eyeLayer = featureLayer?.sublayers?[0]
//eyeLayer?.isHidden = false
}
}
// create a new one if necessary
if featureLayer == nil {
featureLayer = CALayer()
featureLayer!.contents = square.cgImage
featureLayer!.name = "FaceLayer"
previewLayer?.addSublayer(featureLayer!)
eyeLayer = CALayer()
eyeLayer!.contents = eyes.cgImage
eyeLayer!.name = "EyeLayer"
featureLayer?.addSublayer(eyeLayer!)
}
featureLayer!.frame = faceRect
eyeLayer!.frame = eyeRect
0,0 is at bottom left for the eyePositions, so you have to eyePosition.y = image.size.height - eyePosition.y to be in the same coordinate system as frames.
I have the following UIImage:
Using Objective-C, I want to be able to invert the white to black and vice versa.
Any help would be appreciated.
Swift
Using CIContext instead of -UIImage:CIImage (see https://stackoverflow.com/a/28386697/218152), and building upon #wtznc's response, here is a self-contained IBDesignable:
#IBDesignable
class InvertImage: UIImageView {
#IBInspectable var originalImage:UIImage? = nil
#IBInspectable var invert:Bool = false {
didSet {
var inverted = false
if let originalImage = self.originalImage {
if(invert) {
let image = CIImage(CGImage: originalImage.CGImage!)
if let filter = CIFilter(name: "CIColorInvert") {
filter.setDefaults()
filter.setValue(image, forKey: kCIInputImageKey)
let context = CIContext(options: nil)
let imageRef = context.createCGImage(filter.outputImage!, fromRect: image.extent)
self.image = UIImage(CGImage: imageRef)
inverted = true
}
}
}
if(!inverted) {
self.image = self.originalImage
}
}
}
}
To use it, set Original Image instead of Image since Image will be dynamically associated:
Swift3
extension UIImage {
func invertedImage() -> UIImage? {
guard let cgImage = self.cgImage else { return nil }
let ciImage = CoreImage.CIImage(cgImage: cgImage)
guard let filter = CIFilter(name: "CIColorInvert") else { return nil }
filter.setDefaults()
filter.setValue(ciImage, forKey: kCIInputImageKey)
let context = CIContext(options: nil)
guard let outputImage = filter.outputImage else { return nil }
guard let outputImageCopy = context.createCGImage(outputImage, from: outputImage.extent) else { return nil }
return UIImage(cgImage: outputImageCopy, scale: self.scale, orientation: .up)
}
}
- (UIImage *)negativeImage
{
// get width and height as integers, since we'll be using them as
// array subscripts, etc, and this'll save a whole lot of casting
CGSize size = self.size;
int width = size.width;
int height = size.height;
// Create a suitable RGB+alpha bitmap context in BGRA colour space
CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *memoryPool = (unsigned char *)calloc(width*height*4, 1);
CGContextRef context = CGBitmapContextCreate(memoryPool, width, height, 8, width * 4, colourSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(colourSpace);
// draw the current image to the newly created context
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
// run through every pixel, a scan line at a time...
for(int y = 0; y < height; y++)
{
// get a pointer to the start of this scan line
unsigned char *linePointer = &memoryPool[y * width * 4];
// step through the pixels one by one...
for(int x = 0; x < width; x++)
{
// get RGB values. We're dealing with premultiplied alpha
// here, so we need to divide by the alpha channel (if it
// isn't zero, of course) to get uninflected RGB. We
// multiply by 255 to keep precision while still using
// integers
int r, g, b;
if(linePointer[3])
{
r = linePointer[0] * 255 / linePointer[3];
g = linePointer[1] * 255 / linePointer[3];
b = linePointer[2] * 255 / linePointer[3];
}
else
r = g = b = 0;
// perform the colour inversion
r = 255 - r;
g = 255 - g;
b = 255 - b;
if ( (r+g+b) / (3*255) == 0 )
{
linePointer[0] = linePointer[1] = linePointer[2] = 0;
linePointer[3] = 0;
}
else
{
// multiply by alpha again, divide by 255 to undo the
// scaling before, store the new values and advance
// the pointer we're reading pixel data from
linePointer[0] = r * linePointer[3] / 255;
linePointer[1] = g * linePointer[3] / 255;
linePointer[2] = b * linePointer[3] / 255;
}
linePointer += 4;
}
}
// get a CG image from the context, wrap that into a
// UIImage
CGImageRef cgImage = CGBitmapContextCreateImage(context);
UIImage *returnImage = [UIImage imageWithCGImage:cgImage];
// clean up
CGImageRelease(cgImage);
CGContextRelease(context);
free(memoryPool);
// and return
return returnImage;
}
I added the above method inside a UIImage extension class.
Firstly, you have to add the Core Image framework to your project.
Project settings -> Targets "project name" -> Build phases -> Link Binary With Libraries -> Add items -> CoreImage.framework
Secondly, import the Core Image header to your implementation file.
#import <CoreImage/CoreImage.h>
Initialize an UIImage object to store the original file.
UIImage *inputImage = [UIImage imageNamed:#"imageNamed"];
Create a CIFilter to define how you want to modify your original UIImage object.
CIFilter* filter = [CIFilter filterWithName:#"CIColorInvert"];
[filter setDefaults];
[filter setValue:inputImage.CIImage forKey:#"inputImage"];
Create another UIImage object to keep modified image.
UIImage *outputImage = [[UIImage alloc] initWithCIImage:filter.outputImage];
VoilĂ ! Hope it will help.
For Xamarin C# (iOS-specific; not a cross-platform solution):
public static UIImage InvertImageColors( UIImage original )
{
return ApplyFilter( original, new CIColorInvert() );
}
public static UIImage ApplyFilter( UIImage original, CIFilter filter )
{
UIImage result;
try {
CIImage coreImage = original.CGImage;
filter.SetValueForKey( coreImage, CIFilterInputKey.Image );
CIImage output = filter.OutputImage;
CIContext context = CIContext.FromOptions( null );
CGImage cgImage = context.CreateCGImage( output, output.Extent );
result = UIImage.FromImage( cgImage );
} catch (Exception ex) {
// .. Log error here ..
result = original;
}
return result;
}
NOTE #1: Adapted from someone else's answer; I've lost track of where the original is.
NOTE #2: Deliberately done with one small step per line, so you can see the intermediate types, and easily adapt to other situations.
Also see Xamarin docs - CIFilter class
I don't know how to set partial text as a Bold to LabelField in Blackberry.
Kindly refer the attached image,
This example should contain everything you'll need. Your code should look something like this:
String copyText = "Tap NEXT to proceed";
Font[] fonts = new Font[2];
fonts[0] = Font.getDefault();
fonts[1] = Font.getDefault().derive(Font.BOLD);
int[] offsets = new int[4];
offsets[0] = 0;
offsets[1] = "Tap ".length
offsets[2] = "Tap NEXT".length;
offsets[3] = copyText.length();
byte[] attributes = new byte[3];
attributes[0] = 0;
attributes[1] = 1;
attributes[2] = 0;
RichTextField textField = new RichTextField(copyText, offsets, attributes, fonts, NON_FOCUSABLE);
add(textField);
I have the following MonoTouch code which can change the Saturation , but I am trying to also change the Hue.
float hue = 0;
float saturation = 1;
if (colorCtrls == null)
colorCtrls = new CIColorControls() {
Image = CIImage.FromCGImage (originalImage.CGImage) };
else
colorCtrls.Image = CIImage.FromCGImage(originalImage.CGImage);
colorCtrls.Saturation = saturation;
var output = colorCtrls.OutputImage;
var context = CIContext.FromOptions(null);
var result = context.CreateCGImage(output, output.Extent);
return UIImage.FromImage(result);
It's part of a different filter so you'll need to use CIHueAdjust instead of CIColorControls to control the hue.
Here's what I ended up doing to add Hue:
var hueAdjust = new CIHueAdjust() {
Image = CIImage.FromCGImage(originalImage.CGImage),
Angle = hue // Default is 0
};
var output = hueAdjust.OutputImage;
var context = CIContext.FromOptions(null);
var cgimage = context.CreateCGImage(output, output.Extent);
return UIImage.FromImage(cgimage);
However, this does not work on Retina devices, the image returned is scaled incorrectly.
I would like to embed some text into a circle in ActionScript. I have three problems: I can't get the text to center in the circle, I can't get the text to be center aligned, and I can't get the font to be applied to the text. Regarding the font, I know that it is embedded correctly, as it works on TextFields I create on the stage.
[Embed(source="DAXCOMPL.TTF", fontName="DaxCompact-Light", mimeType='application/x-font', embedAsCFF='false')]
private var MyFont:Class;
public function Bubble(...) {
var myFont:Font = new MyFont();
var myFormat:TextFormat = new TextFormat();
myFormat.size = 20;
myFormat.align = TextFormatAlign.CENTER;
myFormat.font = myFont.fontName;
var circle:Sprite = new Sprite();
var r:int = 30;
var text:TextField = new TextField();
text.text = "Hello world!";
text.wordWrap = true;
text.defaultTextFormat = myFormat;
text.autoSize = TextFieldAutoSize.LEFT;
text.x = -30;
text.y = -30;
circle.graphics.lineStyle(2, 0x000000, 1.0);
circle.graphics.drawCircle(0,0,r);
circle.graphics.endFill();
circle.addChild(text);
circle.x = 75;
circle.y = 450;
addChild(circle);
}
try to initalize the textfield this way:
var text:TextField = new TextField();
text.embedFonts = true; // use embedded font
text.defaultTextFormat = myFormat; // use this command before setting text
text.text = "Hello world!";
text.wordWrap = true;
text.autoSize = TextFieldAutoSize.LEFT;
text.x = -text.textHeight*0.5; //center the textfield after setting text
text.y = -text.textWidth*0.5;