Flutter can not register a texture on any iOS device - ios

I'm working on a small flutter app that requires texture rendering. My typical plugin looks like this:
.....
private var textures: FlutterTextureRegistry!
private var renderers = [Int64: FlutterRTCVideoRenderer]()
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "mypluginchannel", binaryMessenger: registrar.messenger())
let instance = MyPlugin(registrar.textures(), registrar.messenger(), registrar)
registrar.addMethodCallDelegate(instance, channel: channel)
}
init(_ textures: FlutterTextureRegistry, _ messanger: FlutterBinaryMessenger, _ registry: FlutterPluginRegistrar) {
super.init()
self.textures = textures
self.messanger = messanger
self.registry = registry
}
- (void)handleMethodCall:(FlutterMethodCall*)call result:(FlutterResult)result {
if ([#"createTexture" isEqualToString:call.method]) {
CGFloat width = [call.arguments[#"width"] floatValue];
CGFloat height = [call.arguments[#"height"] floatValue];
NSInteger __block textureId;
id<FlutterTextureRegistry> __weak registry = self.textures;
OpenGLRender *render = [[OpenGLRender alloc] initWithSize:CGSizeMake(width, height)
worker:[[SampleRenderWorker alloc] init]
onNewFrame:^{
[registry textureFrameAvailable:textureId];
}];
textureId = [self.textures registerTexture:render]; // Always fails and returns 0
self.renders[#(textureId)] = render;
result(#(textureId));
}
textureId is always 0 which (according to documentation) means texture is not registered.
I really have no idea how to fix this. Is there any logging mechanism or error code to obtain from somewhere to know what's going on? Any ideas appreciated thanks.

Related

Has anyone tried how to use vision api(VNHomographicImageRegistrationRequest) in ios 11?

I am studying currency recognition problems which is related to the Vision SDK of iOS11.
I'm having trouble handling VNHomographicImageRegistrationRequest, which determines the perspective warp matrix needed to align the content of two images. But I couldn't find how to send two images parameters into this API, can anyone help me?
Apple's Vision framework flow is always the same: Request -> Handler -> Observation
Example:
// referenceAsset & asset2 can be:
// CGImage - CIImage - URL - Data - CVPixelBuffer
// Check initializers for more info
let request = VNHomographicImageRegistrationRequest(targetedCGImage: asset2, options: [:])
let handler = VNSequenceRequestHandler()
try! handler.perform([request], on: referenceAsset)
if let results = request.results as? [VNImageHomographicAlignmentObservation] {
print("Perspective warp found: \(results.count)")
results.forEach { observation in
// A matrix with 3 rows and 3 columns.
print(observation.warpTransform)
}
}
-(matrix_float3x3)predictWithVisionFromImage:(UIImage *)imageTarget toReferenceImage:(UIImage*)imageRefer{
UIImage *scaledImageTarget = [imageTarget scaleToSize:CGSizeMake(224, 224)];
CVPixelBufferRef bufferTarget = [imageTarget pixelBufferFromCGImage:scaledImageTarget];
UIImage *scaledImageRefer = [imageRefer scaleToSize:CGSizeMake(224, 224)];
CVPixelBufferRef bufferRefer = [imageRefer pixelBufferFromCGImage:scaledImageRefer];
VNHomographicImageRegistrationRequest* request = [[VNHomographicImageRegistrationRequest alloc]initWithTargetedCVPixelBuffer:bufferTarget completionHandler:nil];
VNHomographicImageRegistrationRequest* imageRequest = (VNHomographicImageRegistrationRequest*)request;
VNImageRequestHandler* handler = [[VNImageRequestHandler alloc]initWithCVPixelBuffer:bufferRefer options:#{}];
[handler performRequests:#[imageRequest] error:nil];
NSArray* resultsArr = imageRequest.results;
VNImageHomographicAlignmentObservation* firstObservation = [resultsArr firstObject];
return firstObservation.warpTransform;
}

SocketScan Getting the Battery Level in Swift

Whatever I seem to try I cannot currently get back the Battery level from the iOS/SocketScan API. I am using version 10.3.36, here is my code so far:
func onDeviceArrival(result: SKTRESULT, device deviceInfo: DeviceInfo!) {
print("onDeviceArrival:\(deviceInfo.getName())")
scanApiHelper.postGetBattery(deviceInfo, target: self, response: #selector(onGetBatteryInfo))
}
func onGetBatteryInfo(scanObj: ISktScanObject) {
let result:SKTRESULT = scanObj.Msg().Result()
print("GetBatteryInfo status:\(result)")
if (result == ESKT_NOERROR) {
let batterylevel = scanObj.Property().getUlong()
print("Battery is:\(batterylevel)")
} else {
print("Error GetBatteryInfo status:\(result)")
}
However, the values I get back are:
GetBatteryInfo status:0
Battery is:1677741312
If my code is correct then how do I make the Battery result I get back a meaningful result, like a percentage? If I'm way off then how do I get back info like the battery level, firmware version etc?
Thanks
David
EDIT: SKTBATTERY_GETCURLEVEL isn't supported in Swift. However, the docs explain that the battery level response includes the min, current and max levels encoded in the first, second and third bytes, respectively.
The following is equivalent to using SKTBATTERY_GETCURLEVEL
Swift
func onGetBatteryInfo(scanObj: ISktScanObject) {
let result:SKTRESULT = scanObj.Msg().Result()
if(SKTSUCCESS(result)){
let batteryInfo = scanObj.Property().getUlong();
let batteryMin = ((batteryInfo >> 4) & 0xff);
let batteryCurrent = ((batteryInfo >> 8) & 0xff);
let batteryMax = ((batteryInfo >> 12) & 0xff);
let batteryPercentage = batteryCurrent / (batteryMax - batteryMin);
print("Battery is:\(batteryPercentage)")
self.setBatteryLevel = batteryPercentage
self.tableView.reloadData
} else {
print("Error GetBatteryInfo status:\(result)")
}
}
Objective-C
-(void) onGetBatteryInfo:(ISktScanObject*)scanObj {
SKTRESULT result=[[scanObj Msg]Result];
if(SKTSUCCESS(result)){
long batteryLevel = SKTBATTERY_GETCURLEVEL([[scanObj Property] getUlong]);
NSLog(#"BatteryInfo %ld", batteryLevel);
[self setBatteryLevel:batteryLevel];
[self.tableView reloadData];
} else {
NSLog(#"Error GetBatteryInfo status: %ld",result);
}
}
Here's code I use. Theres a variable defined in appDelegate for the batteryPercentage, and that is read when the v value is needed. The value is updated each 120 seconds by a timer, this way actions can occur as the level drops etc.
func onBatteryLevel (scanObj: ISktScanObject) {
let result: SKTRESULT = scanObj.Msg().Result()
if (SKTRESULT(result) > -1) {
let property: ISktScanProperty = scanObj.Property()
var batteryLevel = property.getUlong()
#if arch(x86_64) || arch(arm64)
batteryLevel = (batteryLevel<<(48))>>(56)
#else
batteryLevel = (batteryLevel<<(48-32))>>(56-32)
#endif
batteryPercentage = Int(batteryLevel)
} else {
debug ("data error \(result)")
}
}
For Swift 4 I just came across this problem and came up with the following solution.
var lastDeviceConnected : CaptureHelperDevice? {
didSet {
guard let lastDevice = self.lastDeviceConnected else { return }
lastDevice.getBatteryLevelWithCompletionHandler { result, batteryLevel in
guard result == SKTResult.E_NOERROR, let batteryLevel = batteryLevel else { return }
let minimum = SKTHelper.getMinimumLevel(fromBatteryLevel: Int(batteryLevel))
let maximum = SKTHelper.getMaximumLevel(fromBatteryLevel: Int(batteryLevel))
let current = SKTHelper.getCurrentLevel(fromBatteryLevel: Int(batteryLevel))
print("minimum: \(minimum)")
print("maximum: \(maximum)")
print("current: \(current)")
// current is out battery level in %
// minimum and maximum could for example be used for
// for a slider or something that visually presents
// the battery status
}
}
}
In my example I'm not handling the case that there could be no device or that the battery status might not have been retrieved as expected. I simply guard / return. In your example you might want to handle the issue.

How to read QR code from static image

I know that you can use AVFoundation to scan a QR code using the device's camera. Now here comes the problem, how can I do this from an static UIImage object?
Swift 4 version of #Neimsz's answer
func detectQRCode(_ image: UIImage?) -> [CIFeature]? {
if let image = image, let ciImage = CIImage.init(image: image){
var options: [String: Any]
let context = CIContext()
options = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let qrDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: options)
if ciImage.properties.keys.contains((kCGImagePropertyOrientation as String)){
options = [CIDetectorImageOrientation: ciImage.properties[(kCGImagePropertyOrientation as String)] ?? 1]
} else {
options = [CIDetectorImageOrientation: 1]
}
let features = qrDetector?.features(in: ciImage, options: options)
return features
}
return nil
}
How to use
if let features = detectQRCode(#imageLiteral(resourceName: "qrcode")), !features.isEmpty{
for case let row as CIQRCodeFeature in features{
print(row.messageString ?? "nope")
}
}
And during the execution this doesn't produce the Finalizing CVPixelBuffer 0x170133e20 while lock count is 1
I used the following QRCode Image (QRCode = https://jingged.com)
(Tested on iPhone 6 simulator with iOS version 11.2)
Output:
2018-03-14 15:31:13.159400+0530 TestProject[25889:233062] [MC] Lazy loading NSBundle MobileCoreServices.framework
2018-03-14 15:31:13.160302+0530 TestProject[25889:233062] [MC] Loaded MobileCoreServices.framework
https://jingged.com
The iOS API provides the CIDetector class from CoreImage framework.
CIDetector let you find specific patterns in images, like faces, smiles, eyes, or in our case : QRCodes.
Here is the code to detect a QRCode from an UIImage in Objective-C:
-(NSArray *)detectQRCode:(UIImage *) image
{
#autoreleasepool {
NSLog(#"%# :: %#", NSStringFromClass([self class]), NSStringFromSelector(_cmd));
NSCAssert(image != nil, #"**Assertion Error** detectQRCode : image is nil");
CIImage* ciImage = image.CIImage; // assuming underlying data is a CIImage
//CIImage* ciImage = [[CIImage alloc] initWithCGImage: image.CGImage];
// to use if the underlying data is a CGImage
NSDictionary* options;
CIContext* context = [CIContext context];
options = #{ CIDetectorAccuracy : CIDetectorAccuracyHigh }; // Slow but thorough
//options = #{ CIDetectorAccuracy : CIDetectorAccuracyLow}; // Fast but superficial
CIDetector* qrDetector = [CIDetector detectorOfType:CIDetectorTypeQRCode
context:context
options:options];
if ([[ciImage properties] valueForKey:(NSString*) kCGImagePropertyOrientation] == nil) {
options = #{ CIDetectorImageOrientation : #1};
} else {
options = #{ CIDetectorImageOrientation : [[ciImage properties] valueForKey:(NSString*) kCGImagePropertyOrientation]};
}
NSArray * features = [qrDetector featuresInImage:ciImage
options:options];
return features;
}
}
The returned NSArray* will contain CIFeature* if a QRCode is present and detected. If there was no QRCode, the NSArray* will be nil. If the QRCode decoding fails, the NSArray* will have no element.
To obtain the encoded string :
if (features != nil && features.count > 0) {
for (CIQRCodeFeature* qrFeature in features) {
NSLog(#"QRFeature.messageString : %# ", qrFeature.messageString);
}
}
As in the answer of #Duncan-C, you can then extract QRCode corners and draw an enclosing bounding box of the QRCode on the image.
Note :
Under iOS10.0 beta 6, the call to [qrDetector featuresInImage:ciImage options:options] when using images coming from the cameraSampleBuffer logs this internal warning (it runs smoothly but spam the console with this message, and I could not find a way to get rid of it for now):
Finalizing CVPixelBuffer 0x170133e20 while lock count is 1.
Source :
Apple Dev API Reference - CIDetector
Apple Dev API Programming guide - Face detection
None of the answers here were extremely straightforward in regards to returning test messages. Made a tiny extension that works well for me:
https://gist.github.com/freak4pc/3f7ae2801dd8b7a068daa957463ac645
extension UIImage {
func parseQR() -> [String] {
guard let image = CIImage(image: self) else {
return []
}
let detector = CIDetector(ofType: CIDetectorTypeQRCode,
context: nil,
options: [CIDetectorAccuracy: CIDetectorAccuracyHigh])
let features = detector?.features(in: image) ?? []
return features.compactMap { feature in
return (feature as? CIQRCodeFeature)?.messageString
}
}
}
Core Image has the CIDetector class, with the CIDetectorTypeQRCode for detecting QR codes. You can feed a Core Image filter either a still image or a video.
That should meet your needs. See the Xcode docs for more info.
The Github repo iOS8-day-by-day from ShinobiControls includes a project LiveDetection that shows how to use the CIDetectorTypeQRCode both from a video feed and from a still image. It looks like it hasn't been updated for Swift 2.0, and I wasn't able to get it to compile under Xcode 7.2.1, but the function performQRCodeDetection in the project DOES compile. (The compile problems are with code that handles all the horrible type-casting you have to deal with to handle CVPixelBuffers in Swift, which doesn't matter if all you want to do is find QRCodes in static images.)
EDIT:
Here is the key method from that site (in Swift)
func performQRCodeDetection(image: CIImage) -> (outImage: CIImage?, decode: String) {
var resultImage: CIImage?
var decode = ""
if let detector = detector {
let features = detector.featuresInImage(image)
for feature in features as! [CIQRCodeFeature] {
resultImage = drawHighlightOverlayForPoints(image,
topLeft: feature.topLeft,
topRight: feature.topRight,
bottomLeft: feature.bottomLeft,
bottomRight: feature.bottomRight)
decode = feature.messageString
}
}
return (resultImage, decode)
}
If you need just a string you can use such code:
class QRToString {
func string(from image: UIImage) -> String {
var qrAsString = ""
guard let detector = CIDetector(ofType: CIDetectorTypeQRCode,
context: nil,
options: [CIDetectorAccuracy: CIDetectorAccuracyHigh]),
let ciImage = CIImage(image: image),
let features = detector.features(in: ciImage) as? [CIQRCodeFeature] else {
return qrAsString
}
for feature in features {
guard let indeedMessageString = feature.messageString else {
continue
}
qrAsString += indeedMessageString
}
return qrAsString
}
}
Use Zbar SDK to read QRcode from Static Image.
ZBar-SDK-iOS
Please check this tutorial regarding intigration of Zbar SDK.
ZBar SDK Integration Tutorial
And then try to Scan static image.
Use Zbar scanner class to scan your image.
Here is documentation.
ZBarImageScanner.
Just for Example , How to use Zbar scanner class,
ZBarImageScanner *scandoc = [[ZBarImageScanner alloc]init];
NSInteger resultsnumber = [scandoc scanImage:yourUIImage];
if(resultsnumber > 0){
ZBarSymbolSet *results = scandoc.results;
//Here you will get result.
}
Below link will help you.
scaning-static-uiimage-using-ios-zbar-sdk
Objective-C
- (NSString *)readQR {
CIDetector *detector = [CIDetector detectorOfType:CIDetectorTypeQRCode context:nil options:#{
CIDetectorAccuracy:CIDetectorAccuracyHigh
}];
/// in here you can replace `self` to your image value
CIImage *ciImage = [[CIImage alloc]initWithImage:self];
NSArray *features = [detector featuresInImage:ciImage];
if([features count] == 0) {
return nil;
}
__block NSString *qrString = #"";
[features enumerateObjectsUsingBlock:^(CIQRCodeFeature * _Nonnull obj, NSUInteger idx, BOOL * _Nonnull stop) {
qrString = obj.messageString;
}];
return qrString;
}

CIDetector.RectDetector bounds to view bounds coordinates

So,
I am trying to display a rectanlge around a detected document (A4)
I am using an AVCaptureSession for the feed along with the AVCaptureStillImageOutput Output.
NSError Error = null;
Session = new AVCaptureSession();
AVCaptureDevice Device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
AVCaptureDeviceInput DeviceInput = AVCaptureDeviceInput.FromDevice(Device, out Error);
Session.AddInput(DeviceInput);
AVCaptureStillImageOutput CaptureOutput = new AVCaptureStillImageOutput();
CaptureOutput.OutputSettings = new NSDictionary(AVVideo.CodecKey, AVVideo.CodecJPEG) ;
Session.AddOutput(CaptureOutput);
I have a timer that takes the output and passes that to my handler
NSTimer.CreateRepeatingScheduledTimer(TimeSpan.Parse("00:00:02"), delegate
{
CaptureImageWithMetadata(CaptureOutput,CaptureOutput.Connections[0]);
});
I also have an AVCapturePreviewLayer with its bound being full screen (iPad Mini Portrait)
PreviewLayer = new AVCaptureVideoPreviewLayer(Session);
PreviewLayer.Frame = this.View.Frame;
PreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
this.View.Layer.AddSublayer(PreviewLayer);
PreviewLayer.ZPosition = (PreviewLayer.ZPosition - 1);
Below is the handler
private async void CaptureImageWithMetadata(AVCaptureStillImageOutput output, AVCaptureConnection connection)
{
var sampleBuffer = await output.CaptureStillImageTaskAsync(connection);
var imageData = AVCaptureStillImageOutput.JpegStillToNSData(sampleBuffer);
var image = CIImage.FromData(imageData);
var metadata = image.Properties.Dictionary.MutableCopy() as NSMutableDictionary;
CIContext CT = CIContext.FromOptions(null);
CIDetectorOptions OP = new CIDetectorOptions();
OP.Accuracy = FaceDetectorAccuracy.High;
OP.AspectRatio = 1.41f;
CIDetector CI = CIDetector.CreateRectangleDetector(CT, OP);
CIFeature[] HH = CI.FeaturesInImage(image,CIImageOrientation.BottomRight);
CGAffineTransform Transfer = CGAffineTransform.MakeScale(1, -1);
Transfer = CGAffineTransform.Translate(Transfer, 0, -this.View.Bounds.Size.Height);
if (HH.Length > 0)
{
CGRect RECT = CGAffineTransform.CGRectApplyAffineTransform(HH[0].Bounds, Transfer);
Console.WriteLine("start");
Console.WriteLine("IMAGE : "+HH[0].Bounds.ToString());
Console.WriteLine("SCREEN :"+RECT.ToString());
Console.WriteLine("end");
BB.Frame = RECT;
BB.Hidden = false;
}
}
Despite however after following a guid that suggested I need to convert the coordinates - my highlighter (green) is not surround the document, and i cant figure out why.
I am using CIImageOrientation.BottomRight just as test but no matter what i put here.. always the same result. See Images

No head tracking in demo scene

I have downloaded the sdk and tried to load up the demo scene but there is no head tracking when I export an apk, any suggestions?
on installing the apk, i get the stereo scene and the cube is rotating, but no head tracking. I can turn on and off the distortion but have not found where I can enable head tracking.
Any help would be much appreciated.
1.) Create a new c# script in Unity3D with this content:
using UnityEngine;
using System.Collections;
public class VROneRotateAround : MonoBehaviour {
public Transform target;
public float distance = 5f;
public Vector3 offset = Vector3.zero;
public bool useAngleX = true;
public bool useAngleY = true;
public bool useAngleZ = true;
public bool useRealHorizontalAngle = true;
public bool resetViewOnTouch = true;
private Quaternion initialRotation = Quaternion.identity;
private Quaternion currentRotation;
private static Vector3 gyroAngles; // original angles from gyro
private static Vector3 usedAngles; // converted into unity world coordinates
private int userSleepTimeOut; // original device SleepTimeOut setting
private bool gyroAvail = false;
void Start() {
Input.compensateSensors = true;
transform.position = (target.position + offset) - transform.forward * distance;
}
void FixedUpdate() {
if (gyroAvail == false) {
if (Input.gyro.attitude.eulerAngles != Vector3.zero && Time.frameCount > 30) {
gyroAvail = true;
initialRotation = Input.gyro.attitude;
target.gameObject.SendMessage("StartFlight");
}
return; // early out
}
// reset origin on touch or not yet set origin
if(resetViewOnTouch && (Input.touchCount > 0))
initialRotation = Input.gyro.attitude;
// new rotation
currentRotation = Quaternion.Inverse(initialRotation)*Input.gyro.attitude;
gyroAngles = currentRotation.eulerAngles;
//usedAngles = Quaternion.Inverse (currentRotation).eulerAngles;
usedAngles = gyroAngles;
// reset single angle values
if (useAngleX == false)
usedAngles.x = 0f;
if (useAngleY == false)
usedAngles.y = 0f;
if (useAngleZ == false)
usedAngles.z = 0f;
if (useRealHorizontalAngle)
usedAngles.y *= -1;
transform.localRotation = Quaternion.Euler (new Vector3(-usedAngles.x, usedAngles.y, usedAngles.z));
transform.position = (target.position + offset) - transform.forward * distance;
}
public static Vector3 GetUsedAngles() {
return usedAngles;
}
public static Vector3 GetGyroAngles() {
return gyroAngles;
}
public void ResetView() {
initialRotation = Input.gyro.attitude;
}
void OnEnable() {
// sensor on
Input.gyro.enabled = true;
initialRotation = Quaternion.identity;
gyroAvail = false;
// store device sleep timeout setting
userSleepTimeOut = Screen.sleepTimeout;
// disable sleep timeout when app is running
Screen.sleepTimeout = SleepTimeout.NeverSleep;
}
void OnDisable() {
// restore original sleep timeout
Screen.sleepTimeout = userSleepTimeOut;
//sensor off
Input.gyro.enabled = false;
}
}
Open the demo scene and attach this script to the VROneSDK prefab.
In the property editor select Cube as Target and enter 6 for distance.
Build the app and test it on a device or use UnityRemote to test the behaviour in the editor.
the SDK seems to run only on the supported deviced you need to get a S5 to get it working. You also need to get the Android Pro Plugin for Unity.
We actually did get the distotion going but the screen is cropped for like 10 pxels on either side. That can't be right.
Same here: I see the demo scene (rotating cube on a grid backdrop and three light sources) in a left/right split-screen, but there is no head tracking. No pre-distortion either.
This is on a Samsung Galaxy S3 (Android 4.3) with Unity 4.5.3f3.

Resources