App Crashes when accessing component's setter (iOS 13) - ios

I wrote the code base on Apple's example,
somehow the app will crash if accessing the setter of the custom entity component (the reveal() method)
I tried to initialize the component to see what will happen, but it crashed at anchor.addChild(card) (ViewController line 89) instead.
But it runs without any problem on iPadOS 14 which makes it even weirder.
Does anyone know why it happens and how to fix it?
my code
// CardEntity.swift
// Card-Game
//
// Created by Ian Liu on 2020/7/21.
//
import Foundation
import RealityKit
class CardEntity: Entity, HasModel, HasCollision {
required init() {
super.init()
self.components[ModelComponent] = ModelComponent(
mesh: .generateBox(width: 0.09, height: 0.005, depth: 0.09),
materials: [SimpleMaterial(
color: .orange,
isMetallic: false)
]
)
}
var card: CardComponent {
get { components[CardComponent] ?? CardComponent() }
set { components[CardComponent] = newValue }
}
}
extension CardEntity {
func reveal() {
card.revealed = true
var transform = self.transform
transform.rotation = simd_quatf(angle: 0, axis: [1, 0, 0])
move(to: transform, relativeTo: parent, duration: 0.25, timingFunction: .easeInOut)
}
func hide() {
card.revealed = false
var transform = self.transform
transform.rotation = simd_quatf(angle: .pi, axis: [1, 0, 0])
move(to: transform, relativeTo: parent, duration: 0.25, timingFunction: .easeInOut)
}
}
// CardComponent.swift
// Card-Game
//
// Created by Ian Liu on 2020/7/21.
//
import Foundation
import RealityKit
struct CardComponent: Component, Codable {
var revealed = false
var id = -1
init() {
self.revealed = false
self.id = -1
}
}
//
// ViewController.swift
// Card-Game
//
// Created by 劉學逸 on 7/16/20.
//
import UIKit
import ARKit
import RealityKit
import Combine
class ViewController: UIViewController, ARSessionDelegate {
#IBOutlet var arView: ARView!
let cardCount = 16
var prevCard: CardEntity? = nil
override func viewDidLoad() {
super.viewDidLoad()
arView.center = self.view.center
arView.centerXAnchor.constraint(equalTo: self.view.centerXAnchor).isActive = true
arView.centerYAnchor.constraint(equalTo: self.view.centerYAnchor).isActive = true
arView.session.delegate = self
let config = ARWorldTrackingConfiguration()
if ARWorldTrackingConfiguration.supportsFrameSemantics(.personSegmentation) {
config.frameSemantics.insert(.personSegmentationWithDepth)
}
config.planeDetection = [.horizontal]
arView.session.run(config)
addCoachingOverlay()
let anchor = AnchorEntity(plane: .horizontal, minimumBounds: [0.2, 0.2])
arView.scene.addAnchor(anchor)
CardComponent.registerComponent()
var cards: [CardEntity] = []
var models: [Entity] = []
for index in 1...cardCount {
let cardModel = CardEntity()
cardModel.name = "card_\((index+1)/2)"
cardModel.card.id = (index+1)/2
cards.append(cardModel)
}
var cancellable: Cancellable? = nil
// will build for a long time if load 8 models
cancellable = ModelEntity.loadModelAsync(named: "memory_card_1")
.append(ModelEntity.loadModelAsync(named: "memory_card_2"))
.append(ModelEntity.loadModelAsync(named: "memory_card_3"))
.append(ModelEntity.loadModelAsync(named: "memory_card_4"))
.append(ModelEntity.loadModelAsync(named: "memory_card_5"))
.append(ModelEntity.loadModelAsync(named: "memory_card_6"))
.append(ModelEntity.loadModelAsync(named: "memory_card_7"))
.append(ModelEntity.loadModelAsync(named: "memory_card_8"))
.collect().sink(receiveCompletion: {error in
print("error: \(error)")
cancellable?.cancel()},
receiveValue: { entities in
for (index, entity) in entities.enumerated() {
entity.setScale(SIMD3<Float>(0.0025,0.0025,0.0025), relativeTo: anchor)
entity.position = entity.position + SIMD3<Float>(0, 0.0025, 0)
entity.name = "memory_card_\(index)"
for _ in 1...2 {
models.append(entity.clone(recursive: true))
}
}
for (index, card) in cards.enumerated() {
card.addChild(models[index])
}
cards.shuffle()
for card in cards {
var flipTransform = card.transform
flipTransform.rotation = simd_quatf(angle: .pi, axis: [1, 0, 0])
card.move(to: flipTransform, relativeTo: card.parent)
}
//attach cards to the anchor
for (index, card) in cards.enumerated() {
card.generateCollisionShapes(recursive: true)
let x = Float(index % Int(sqrt(Double(self.cardCount)))) - 1.5
let z = Float(index / Int(sqrt(Double(self.cardCount)))) - 1.5
card.position = [x * 0.1, 0, z * 0.1]
anchor.addChild(card)
}
cancellable?.cancel()
})
// Adding Occlusion Box
// Create box mesh, 0.7 meters on all sides
let boxSize: Float = 0.7
let boxMesh = MeshResource.generateBox(size: boxSize)
// Create Occlusion Material
let material = OcclusionMaterial()
// Create ModelEntity using mesh and materials
let occlusionBox = ModelEntity(mesh: boxMesh, materials: [material])
// Position box with top slightly below game board
occlusionBox.position.y = -boxSize / 2 - 0.001
// Add to anchor
anchor.addChild(occlusionBox)
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
super.viewWillTransition(to: size, with: coordinator)
arView.center = view.center
centerCoachingView()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
guard let configuration = arView.session.configuration else { return }
arView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
arView.session.pause()
}
#IBAction func onTap(_ sender: UITapGestureRecognizer) {
let tapLocation = sender.location(in: arView)
if let entity = arView.entity(at: tapLocation) {
var card = entity
if entity.name.hasPrefix("memory_card") {
card = entity.parent!
}
guard let cardEntity = card as? CardEntity else { return }
print("tapped card id: \(cardEntity.card.id)")
if cardEntity.card.revealed {
return
} else {
cardEntity.reveal()
}
if let lastCard = prevCard {
print("last card id: \(lastCard.card.id)")
if lastCard.card.id != cardEntity.card.id {
DispatchQueue.main.asyncAfter(deadline: .now() + 1) {
cardEntity.hide()
lastCard.hide()
}
}
self.prevCard = nil
} else {
print("last card id: N\\A")
prevCard = cardEntity
}
}
}
}
extension ViewController: ARCoachingOverlayViewDelegate {
func addCoachingOverlay() {
let coachingOverlay = ARCoachingOverlayView()
coachingOverlay.autoresizingMask = [.flexibleHeight, .flexibleWidth]
coachingOverlay.center = view.center
coachingOverlay.delegate = self
coachingOverlay.session = arView.session
coachingOverlay.goal = .horizontalPlane
arView.addSubview(coachingOverlay)
NSLayoutConstraint.activate([
coachingOverlay.centerXAnchor.constraint(equalTo: view.centerXAnchor),
coachingOverlay.centerYAnchor.constraint(equalTo: view.centerYAnchor),
coachingOverlay.widthAnchor.constraint(equalTo: view.widthAnchor),
coachingOverlay.heightAnchor.constraint(equalTo: view.heightAnchor)
])
coachingOverlay.activatesAutomatically = true
}
//TODO: Fix coachingView won't center when first orientation change
func centerCoachingView() {
for subview in arView.subviews {
if let coachingView = subview as? ARCoachingOverlayView {
coachingView.center = self.view.center
}
}
}
}

Related

Extracting image from AVPlayerVideo

I have an application where a button is pressed, the player is paused and then I want to extract the image that is shown on screen. That is to do some processing and display that result.
This was my first attempt: Extracting bitmap from AVPlayer is very uncertain
There is some problem there, I can't get the correct image back at the moment. So I thought maybe a different approach would be to create a bitmap using the view that contain the video.
import Foundation
import UIKit
import AVKit
import TensorFlowLite
class VideoPlayerController : UIViewController {
#IBOutlet weak var videoPlayerView: UIView!
#IBOutlet weak var playButton: UIButton!
#IBOutlet weak var forwardButton: UIButton!
#IBOutlet weak var rewindButton: UIButton!
#IBOutlet weak var playSlowButton: UIButton!
var playbackSlider: UISlider?
var videoDuration: CMTime?
var videoDurationSeconds: Float64?
var movieName: String?
var player: AVPlayer?
var isPlaying = false
var lines = [UIView]()
var playerViewController: AVPlayerViewController?
let delta = 0.02
let slowRate: Float = 0.03
var currentRate: Float = 1.0
var times = [NSValue]()
var timeObserverToken: Any?
func setTimeObserverToken() {
timeObserverToken = player!.addBoundaryTimeObserver(forTimes: times, queue: .main) {
let time = CMTimeGetSeconds(self.player!.currentTime())
self.updateSlider(time: Float(time))
}
}
func addBoundaryTimeObserver() {
// Divide the asset's duration into quarters.
let interval = CMTimeMultiplyByFloat64(videoDuration!, multiplier: 0.0001)
var currentTime = CMTime.zero
// Calculate boundary times
while currentTime < videoDuration! {
currentTime = currentTime + interval
times.append(NSValue(time:currentTime))
}
setTimeObserverToken()
}
func updateSlider(time: Float) {
playbackSlider?.setValue(time, animated: true)
}
func removeBoundaryTimeObserver() {
if let timeObserverToken = timeObserverToken {
player!.removeTimeObserver(timeObserverToken)
self.timeObserverToken = nil
}
}
func setupPlayer(movieName: String) {
let movieUrl = fileURL(for: movieName)
player = AVPlayer(url: movieUrl)
let playerFrame = CGRect(x: 0, y: 0, width: videoPlayerView.frame.width, height: videoPlayerView.frame.height)
playerViewController = AVPlayerViewController()
playerViewController!.player = player
playerViewController?.videoGravity = .resizeAspectFill
playerViewController!.view.frame = playerFrame
playerViewController!.showsPlaybackControls = false
addChild(playerViewController!)
videoPlayerView.addSubview(playerViewController!.view)
playerViewController!.didMove(toParent: self)
}
func setupPlaybackSlider() {
let sliderWidth = 300
let sliderHeight = 20
let sliderX = Int((self.view.frame.width - CGFloat(sliderWidth)) / 2.0)
let sliderY = Int(self.view.frame.height - 2.5 * self.playButton.frame.height)
playbackSlider = UISlider(frame:CGRect(x:sliderX, y: sliderY, width:sliderWidth, height:sliderHeight))
playbackSlider!.minimumValue = 0
videoDuration = (player!.currentItem?.asset.duration)!
videoDurationSeconds = CMTimeGetSeconds(videoDuration!)
playbackSlider!.maximumValue = Float(videoDurationSeconds!)
playbackSlider!.isContinuous = true
playbackSlider!.tintColor = UIColor.green
playbackSlider!.addTarget(self, action: #selector(self.playbackSliderValueChanged(_:)), for: .valueChanged)
self.view.addSubview(playbackSlider!)
addBoundaryTimeObserver()
}
override func viewDidLoad() {
super.viewDidLoad()
if let movieName = movieName {
setupPlayer(movieName: movieName)
setupPlaybackSlider();
}
let rewindText = "- \(delta) s"
let forwardText = "+ \(delta) s"
rewindButton.setTitle(rewindText, for: .normal)
forwardButton.setTitle(forwardText, for: .normal)
}
#objc func playbackSliderValueChanged(_ playbackSlider:UISlider)
{
if (isPlaying) {
pausePlayer()
}
let seconds : Int64 = Int64(playbackSlider.value)
let targetTime:CMTime = CMTimeMake(value: seconds, timescale: 1)
player!.seek(to: targetTime)
}
func getNewLineViews(lines: [LineSegment], color: UIColor) -> [UIView] {
let widthScale = videoPlayerView.frame.width / CGFloat(inputWidth)
let heightScale = videoPlayerView.frame.height / CGFloat(inputHeight)
var lineViews = [UIView]()
for line in lines {
let view = UIView()
let path = UIBezierPath()
let start = CGPoint(x: Double(widthScale) * line.start.x,
y: Double(heightScale) * line.start.y)
let end = CGPoint(x: Double(widthScale) * line.end.x,
y: Double(heightScale) * line.end.y)
path.move(to: start)
path.addLine(to: end)
let shapeLayer = CAShapeLayer()
shapeLayer.path = path.cgPath
shapeLayer.strokeColor = color.cgColor
shapeLayer.lineWidth = 2.0
view.layer.addSublayer(shapeLayer)
lineViews.append(view)
}
return lineViews
}
func getLines(lines: [LineSegment]) {
let widthScale = videoPlayerView.frame.width / CGFloat(inputWidth)
let heightScale = videoPlayerView.frame.height / CGFloat(inputHeight)
for line in lines {
let view = UIView()
let path = UIBezierPath()
let start = CGPoint(x: Double(widthScale) * line.start.x,
y: Double(heightScale) * line.start.y)
let end = CGPoint(x: Double(widthScale) * line.end.x,
y: Double(heightScale) * line.end.y)
path.move(to: start)
path.addLine(to: end)
let shapeLayer = CAShapeLayer()
shapeLayer.path = path.cgPath
shapeLayer.strokeColor = UIColor.green.cgColor
shapeLayer.lineWidth = 2.0
view.layer.addSublayer(shapeLayer)
self.lines.append(view)
}
}
func getValidRectangles(rectangles: [LineSegment?]) -> [LineSegment] {
var result = [LineSegment]()
for rectangle in rectangles {
if let rectangle = rectangle {
result.append(rectangle)
}
}
return result
}
func drawNewResults(result: Result) {
for view in self.lines {
view.removeFromSuperview()
}
let lines = getValidRectangles(rectangles: result.inferences)
self.lines = getNewLineViews(lines: lines, color: UIColor.red)
for lineView in self.lines {
videoPlayerView!.addSubview(lineView)
}
}
func pausePlayer() {
player?.pause()
playButton.setTitle("Play", for: .normal)
isPlaying = false
}
func startPlayer() {
setTimeObserverToken()
player!.play()
player!.rate = currentRate
playButton.setTitle("Pause", for: .normal)
isPlaying = true
}
#IBAction func playVideo(_ sender: Any) {
if (!isPlaying) {
startPlayer()
} else {
pausePlayer()
}
let currentTime = player!.currentTime()
updateSlider(time: Float(CMTimeGetSeconds(currentTime)))
}
#IBAction func playSlow(_ sender: Any) {
if (currentRate > 0.99) {
currentRate = slowRate
playSlowButton.setTitle("Normal", for: .normal)
} else {
currentRate = 1.0
playSlowButton.setTitle("Slow", for: .normal)
}
if (isPlaying) {
player!.rate = currentRate
}
}
#IBAction func rewindPlayer(_ sender: Any) {
if (isPlaying) {
pausePlayer()
}
let currentTime = player!.currentTime()
let zeroTime = CMTime(seconds: 0, preferredTimescale: currentTime.timescale)
let deltaTime = CMTime(seconds: delta, preferredTimescale: currentTime.timescale)
let seekTime = max(zeroTime, (currentTime - deltaTime))
player!.seek(to: seekTime, toleranceBefore: deltaTime, toleranceAfter: zeroTime)
updateSlider(time: Float(CMTimeGetSeconds(seekTime)))
}
#IBAction func forwardPlayer(_ sender: Any) {
if (isPlaying) {
pausePlayer()
}
let currentTime = player!.currentTime()
let endTime = player!.currentItem?.duration
let deltaTime = CMTime(seconds: delta, preferredTimescale: currentTime.timescale)
let seekTime = min(endTime!, (currentTime + deltaTime))
let zeroTime = CMTime(seconds: 0, preferredTimescale: currentTime.timescale)
player!.seek(to: seekTime, toleranceBefore: zeroTime, toleranceAfter: deltaTime)
updateSlider(time: Float(CMTimeGetSeconds(seekTime)))
}
override func viewWillDisappear(_ animated: Bool) {
removeBoundaryTimeObserver()
}
#IBAction func analyzeImage(_ sender: Any) {
pausePlayer()
let time = player!.currentTime()
imageFromVideo(url: fileURL(for: movieName!), at: time.seconds) { image in
let result = self.detectLines(image: image!)
if let result = result {
// Display results by handing off to the InferenceViewController.
DispatchQueue.main.async {
self.drawNewResults(result: result)
}
}
}
}
func detectLines(image: UIImage) -> Result?{
let newImage = videoPlayerView!.asImage()
let outputTensor: Tensor
guard let rgbData = newImage.scaledData(
with: CGSize(width: inputWidth, height: inputHeight),
byteCount: inputWidth * inputHeight * inputChannels
* batchSize,
isQuantized: false
)
else {
print("Failed to convert the image buffer to RGB data.")
return nil
}
do {
let interpreter = getInterpreter()!
try interpreter.copy(rgbData, toInputAt: 0)
try interpreter.invoke()
outputTensor = try interpreter.output(at: 0)
} catch {
print("Failed to invoke the interpreter with error: \(error.localizedDescription)")
return nil
}
let outputArray = outputTensor.data.toArray(type: Float32.self)
return extractLinesFromPoints(output: outputArray, outputShape: outputTensor.shape.dimensions)
}
func getInterpreter() -> Interpreter? {
var interpreter: Interpreter
let modelFilename = MobileNet.modelInfo.name
guard let modelPath = Bundle.main.path(
forResource: modelFilename,
ofType: MobileNet.modelInfo.extension
) else {
print("Failed to load the model with name: \(modelFilename).")
return nil
}
let options = Interpreter.Options()
do {
interpreter = try Interpreter(modelPath: modelPath, options: options)
try interpreter.allocateTensors()
} catch {
print("Failed to create the interpreter with error: \(error.localizedDescription)")
return nil
}
return interpreter
}
}
extension UIView {
// Using a function since `var image` might conflict with an existing variable
// (like on `UIImageView`)
func asImage() -> UIImage {
if #available(iOS 10.0, *) {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
} else {
UIGraphicsBeginImageContext(self.frame.size)
self.layer.render(in:UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: image!.cgImage!)
}
}
}
I have tried this and some other things, but all I get is a black image.

Live Face Detection- Capture only rectangle part as image

Detect and track faces from the selfie cam feed in real time. I could get that based on source :- https://developer.apple.com/documentation/vision/tracking_the_user_s_face_in_real_time. The following image show that rectangle will be placed in face,
As you can see the red rectangular part of screen, I need to capture only the inside part of rectangle as image and save not the full screen as image. How could I get that?
I have tried with some other source which gives me only the full screen as image but not the rectangular part.
The source code for Live face detection is,
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
// Main view for showing camera content.
#IBOutlet weak var previewView: UIView?
// AVCapture variables to hold sequence data
var session: AVCaptureSession?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoDataOutput: AVCaptureVideoDataOutput?
var videoDataOutputQueue: DispatchQueue?
var captureDevice: AVCaptureDevice?
var captureDeviceResolution: CGSize = CGSize()
// Layer UI for drawing Vision results
var rootLayer: CALayer?
var detectionOverlayLayer: CALayer?
var detectedFaceRectangleShapeLayer: CAShapeLayer?
var detectedFaceLandmarksShapeLayer: CAShapeLayer?
// Vision requests
private var detectionRequests: [VNDetectFaceRectanglesRequest]?
private var trackingRequests: [VNTrackObjectRequest]?
lazy var sequenceRequestHandler = VNSequenceRequestHandler()
// MARK: UIViewController overrides
override func viewDidLoad() {
super.viewDidLoad()
self.session = self.setupAVCaptureSession()
self.prepareVisionRequest()
self.session?.startRunning()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
// MARK: AVCapture Setup
/// - Tag: CreateCaptureSession
fileprivate func setupAVCaptureSession() -> AVCaptureSession? {
let captureSession = AVCaptureSession()
do {
let inputDevice = try self.configureFrontCamera(for: captureSession)
self.configureVideoDataOutput(for: inputDevice.device, resolution: inputDevice.resolution, captureSession: captureSession)
self.designatePreviewLayer(for: captureSession)
return captureSession
} catch let executionError as NSError {
self.presentError(executionError)
} catch {
self.presentErrorAlert(message: "An unexpected failure has occured")
}
self.teardownAVCapture()
return nil
}
/// - Tag: ConfigureDeviceResolution
fileprivate func highestResolution420Format(for device: AVCaptureDevice) -> (format: AVCaptureDevice.Format, resolution: CGSize)? {
var highestResolutionFormat: AVCaptureDevice.Format? = nil
var highestResolutionDimensions = CMVideoDimensions(width: 0, height: 0)
for format in device.formats {
let deviceFormat = format as AVCaptureDevice.Format
let deviceFormatDescription = deviceFormat.formatDescription
if CMFormatDescriptionGetMediaSubType(deviceFormatDescription) == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange {
let candidateDimensions = CMVideoFormatDescriptionGetDimensions(deviceFormatDescription)
if (highestResolutionFormat == nil) || (candidateDimensions.width > highestResolutionDimensions.width) {
highestResolutionFormat = deviceFormat
highestResolutionDimensions = candidateDimensions
}
}
}
if highestResolutionFormat != nil {
let resolution = CGSize(width: CGFloat(highestResolutionDimensions.width), height: CGFloat(highestResolutionDimensions.height))
return (highestResolutionFormat!, resolution)
}
return nil
}
fileprivate func configureFrontCamera(for captureSession: AVCaptureSession) throws -> (device: AVCaptureDevice, resolution: CGSize) {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .front)
if let device = deviceDiscoverySession.devices.first {
if let deviceInput = try? AVCaptureDeviceInput(device: device) {
if captureSession.canAddInput(deviceInput) {
captureSession.addInput(deviceInput)
}
if let highestResolution = self.highestResolution420Format(for: device) {
try device.lockForConfiguration()
device.activeFormat = highestResolution.format
device.unlockForConfiguration()
return (device, highestResolution.resolution)
}
}
}
throw NSError(domain: "ViewController", code: 1, userInfo: nil)
}
/// - Tag: CreateSerialDispatchQueue
fileprivate func configureVideoDataOutput(for inputDevice: AVCaptureDevice, resolution: CGSize, captureSession: AVCaptureSession) {
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames = true
// Create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured.
// A serial dispatch queue must be used to guarantee that video frames will be delivered in order.
let videoDataOutputQueue = DispatchQueue(label: "com.example.apple-samplecode.VisionFaceTrack")
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
if captureSession.canAddOutput(videoDataOutput) {
captureSession.addOutput(videoDataOutput)
}
videoDataOutput.connection(with: .video)?.isEnabled = true
if let captureConnection = videoDataOutput.connection(with: AVMediaType.video) {
if captureConnection.isCameraIntrinsicMatrixDeliverySupported {
captureConnection.isCameraIntrinsicMatrixDeliveryEnabled = true
}
}
self.videoDataOutput = videoDataOutput
self.videoDataOutputQueue = videoDataOutputQueue
self.captureDevice = inputDevice
self.captureDeviceResolution = resolution
}
/// - Tag: DesignatePreviewLayer
fileprivate func designatePreviewLayer(for captureSession: AVCaptureSession) {
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = videoPreviewLayer
videoPreviewLayer.name = "CameraPreview"
videoPreviewLayer.backgroundColor = UIColor.black.cgColor
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
if let previewRootLayer = self.previewView?.layer {
self.rootLayer = previewRootLayer
previewRootLayer.masksToBounds = true
videoPreviewLayer.frame = previewRootLayer.bounds
previewRootLayer.addSublayer(videoPreviewLayer)
}
}
// Removes infrastructure for AVCapture as part of cleanup.
fileprivate func teardownAVCapture() {
self.videoDataOutput = nil
self.videoDataOutputQueue = nil
if let previewLayer = self.previewLayer {
previewLayer.removeFromSuperlayer()
self.previewLayer = nil
}
}
// MARK: Helper Methods for Error Presentation
fileprivate func presentErrorAlert(withTitle title: String = "Unexpected Failure", message: String) {
let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
self.present(alertController, animated: true)
}
fileprivate func presentError(_ error: NSError) {
self.presentErrorAlert(withTitle: "Failed with error \(error.code)", message: error.localizedDescription)
}
// MARK: Helper Methods for Handling Device Orientation & EXIF
fileprivate func radiansForDegrees(_ degrees: CGFloat) -> CGFloat {
return CGFloat(Double(degrees) * Double.pi / 180.0)
}
func exifOrientationForDeviceOrientation(_ deviceOrientation: UIDeviceOrientation) -> CGImagePropertyOrientation {
switch deviceOrientation {
case .portraitUpsideDown:
return .rightMirrored
case .landscapeLeft:
return .downMirrored
case .landscapeRight:
return .upMirrored
default:
return .leftMirrored
}
}
func exifOrientationForCurrentDeviceOrientation() -> CGImagePropertyOrientation {
return exifOrientationForDeviceOrientation(UIDevice.current.orientation)
}
// MARK: Performing Vision Requests
/// - Tag: WriteCompletionHandler
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results as? [VNFaceObservation] else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = nil
faceRectangleShapeLayer.strokeColor = UIColor.green.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = UIColor.yellow.withAlphaComponent(0.7).cgColor
faceLandmarksShapeLayer.lineWidth = 3
faceLandmarksShapeLayer.shadowOpacity = 0.7
faceLandmarksShapeLayer.shadowRadius = 5
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
fileprivate func updateLayerGeometry() {
guard let overlayLayer = self.detectionOverlayLayer,
let rootLayer = self.rootLayer,
let previewLayer = self.previewLayer
else {
return
}
CATransaction.setValue(NSNumber(value: true), forKey: kCATransactionDisableActions)
let videoPreviewRect = previewLayer.layerRectConverted(fromMetadataOutputRect: CGRect(x: 0, y: 0, width: 1, height: 1))
var rotation: CGFloat
var scaleX: CGFloat
var scaleY: CGFloat
// Rotate the layer into screen orientation.
switch UIDevice.current.orientation {
case .portraitUpsideDown:
rotation = 180
scaleX = videoPreviewRect.width / captureDeviceResolution.width
scaleY = videoPreviewRect.height / captureDeviceResolution.height
case .landscapeLeft:
rotation = 90
scaleX = videoPreviewRect.height / captureDeviceResolution.width
scaleY = scaleX
case .landscapeRight:
rotation = -90
scaleX = videoPreviewRect.height / captureDeviceResolution.width
scaleY = scaleX
default:
rotation = 0
scaleX = videoPreviewRect.width / captureDeviceResolution.width
scaleY = videoPreviewRect.height / captureDeviceResolution.height
}
// Scale and mirror the image to ensure upright presentation.
let affineTransform = CGAffineTransform(rotationAngle: radiansForDegrees(rotation))
.scaledBy(x: scaleX, y: -scaleY)
overlayLayer.setAffineTransform(affineTransform)
// Cover entire screen UI.
let rootLayerBounds = rootLayer.bounds
overlayLayer.position = CGPoint(x: rootLayerBounds.midX, y: rootLayerBounds.midY)
}
fileprivate func addPoints(in landmarkRegion: VNFaceLandmarkRegion2D, to path: CGMutablePath, applying affineTransform: CGAffineTransform, closingWhenComplete closePath: Bool) {
let pointCount = landmarkRegion.pointCount
if pointCount > 1 {
let points: [CGPoint] = landmarkRegion.normalizedPoints
path.move(to: points[0], transform: affineTransform)
path.addLines(between: points, transform: affineTransform)
if closePath {
path.addLine(to: points[0], transform: affineTransform)
path.closeSubpath()
}
}
}
fileprivate func addIndicators(to faceRectanglePath: CGMutablePath, faceLandmarksPath: CGMutablePath, for faceObservation: VNFaceObservation) {
let displaySize = self.captureDeviceResolution
let faceBounds = VNImageRectForNormalizedRect(faceObservation.boundingBox, Int(displaySize.width), Int(displaySize.height))
faceRectanglePath.addRect(faceBounds)
if let landmarks = faceObservation.landmarks {
// Landmarks are relative to -- and normalized within --- face bounds
let affineTransform = CGAffineTransform(translationX: faceBounds.origin.x, y: faceBounds.origin.y)
.scaledBy(x: faceBounds.size.width, y: faceBounds.size.height)
// Treat eyebrows and lines as open-ended regions when drawing paths.
let openLandmarkRegions: [VNFaceLandmarkRegion2D?] = [
landmarks.leftEyebrow,
landmarks.rightEyebrow,
landmarks.faceContour,
landmarks.noseCrest,
landmarks.medianLine
]
for openLandmarkRegion in openLandmarkRegions where openLandmarkRegion != nil {
self.addPoints(in: openLandmarkRegion!, to: faceLandmarksPath, applying: affineTransform, closingWhenComplete: false)
}
// Draw eyes, lips, and nose as closed regions.
let closedLandmarkRegions: [VNFaceLandmarkRegion2D?] = [
landmarks.leftEye,
landmarks.rightEye,
landmarks.outerLips,
landmarks.innerLips,
landmarks.nose
]
for closedLandmarkRegion in closedLandmarkRegions where closedLandmarkRegion != nil {
self.addPoints(in: closedLandmarkRegion!, to: faceLandmarksPath, applying: affineTransform, closingWhenComplete: true)
}
}
}
/// - Tag: DrawPaths
fileprivate func drawFaceObservations(_ faceObservations: [VNFaceObservation]) {
guard let faceRectangleShapeLayer = self.detectedFaceRectangleShapeLayer,
let faceLandmarksShapeLayer = self.detectedFaceLandmarksShapeLayer
else {
return
}
CATransaction.begin()
CATransaction.setValue(NSNumber(value: true), forKey: kCATransactionDisableActions)
let faceRectanglePath = CGMutablePath()
let faceLandmarksPath = CGMutablePath()
for faceObservation in faceObservations {
self.addIndicators(to: faceRectanglePath,
faceLandmarksPath: faceLandmarksPath,
for: faceObservation)
}
faceRectangleShapeLayer.path = faceRectanglePath
faceLandmarksShapeLayer.path = faceLandmarksPath
self.updateLayerGeometry()
CATransaction.commit()
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
/// - Tag: PerformRequests
// Handle delegate method callback on receiving a sample buffer.
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
var requestHandlerOptions: [VNImageOption: AnyObject] = [:]
let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil)
if cameraIntrinsicData != nil {
requestHandlerOptions[VNImageOption.cameraIntrinsics] = cameraIntrinsicData
}
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
print("Failed to obtain a CVPixelBuffer for the current output frame.")
return
}
let exifOrientation = self.exifOrientationForCurrentDeviceOrientation()
guard let requests = self.trackingRequests, !requests.isEmpty else {
// No tracking object detected, so perform initial detection
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
orientation: exifOrientation,
options: requestHandlerOptions)
do {
guard let detectRequests = self.detectionRequests else {
return
}
try imageRequestHandler.perform(detectRequests)
} catch let error as NSError {
NSLog("Failed to perform FaceRectangleRequest: %#", error)
}
return
}
do {
try self.sequenceRequestHandler.perform(requests,
on: pixelBuffer,
orientation: exifOrientation)
} catch let error as NSError {
NSLog("Failed to perform SequenceRequest: %#", error)
}
// Setup the next round of tracking.
var newTrackingRequests = [VNTrackObjectRequest]()
for trackingRequest in requests {
guard let results = trackingRequest.results else {
return
}
guard let observation = results[0] as? VNDetectedObjectObservation else {
return
}
if !trackingRequest.isLastFrame {
if observation.confidence > 0.3 {
trackingRequest.inputObservation = observation
} else {
trackingRequest.isLastFrame = true
}
newTrackingRequests.append(trackingRequest)
}
}
self.trackingRequests = newTrackingRequests
if newTrackingRequests.isEmpty {
// Nothing to track, so abort.
return
}
// Perform face landmark tracking on detected faces.
var faceLandmarkRequests = [VNDetectFaceLandmarksRequest]()
// Perform landmark detection on tracked faces.
for trackingRequest in newTrackingRequests {
let faceLandmarksRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceLandmarks error: \(String(describing: error)).")
}
guard let landmarksRequest = request as? VNDetectFaceLandmarksRequest,
let results = landmarksRequest.results as? [VNFaceObservation] else {
return
}
// Perform all UI updates (drawing) on the main queue, not the background queue on which this handler is being called.
DispatchQueue.main.async {
self.drawFaceObservations(results)
}
})
guard let trackingResults = trackingRequest.results else {
return
}
guard let observation = trackingResults[0] as? VNDetectedObjectObservation else {
return
}
let faceObservation = VNFaceObservation(boundingBox: observation.boundingBox)
faceLandmarksRequest.inputFaceObservations = [faceObservation]
// Continue to track detected facial landmarks.
faceLandmarkRequests.append(faceLandmarksRequest)
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
orientation: exifOrientation,
options: requestHandlerOptions)
do {
try imageRequestHandler.perform(faceLandmarkRequests)
} catch let error as NSError {
NSLog("Failed to perform FaceLandmarkRequest: %#", error)
}
}
}
}
If you want to save only that rectangular part of the image by knowing that rectangular frame(CGRect) value, you can crop the original image by passing the required frame(CGRect) value.
let rectFrame = CGRect()
let image = UIImage()
image.cgImage?.cropping(to: rectFrame)

How to change the size of a video view in Swift

I am new to Swift and I am trying to implement some projects. I got this code from Github and it is working fine.
When you click on the app, it starts a video on the iPhone screen and it detects letters and characters using 'TesseractOCR'.
The problem is the video is covering all the screen, I am not able to add any buttons. If I add a button, it disappears under the video.
I tried to add session.sessionPreset = .photo to crop the video from top and down but it did not work. I also tried to add preview.sessionPreset = .photo but did not work either
Note: Main.storyboard is empty.
Here is the code:
import AVFoundation
import UIKit
import Vision
import TesseractOCR
class ViewController: UIViewController, G8TesseractDelegate {
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
tesseract?.pageSegmentationMode = .sparseText
// Recognize only these characters
// tesseract?.charWhitelist = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890()-+*!/?.,##$%&"
tesseract?.charWhitelist = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890"
if isAuthorized() {
configureTextDetection()
configureCamera()
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
private func configureTextDetection() {
textDetectionRequest = VNDetectTextRectanglesRequest(completionHandler: handleDetection)
textDetectionRequest?.reportCharacterBoxes = true
}
private func configureCamera() {
preview.session = session
let cameraDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back)
var cameraDevice: AVCaptureDevice?
for device in cameraDevices.devices {
if device.position == .back {
cameraDevice = device
break
}
}
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: cameraDevice!)
if session.canAddInput(captureDeviceInput) {
session.addInput(captureDeviceInput)
}
}
catch {
print("Error occured \(error)")
return
}
session.sessionPreset = .photo // It was .high
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "Buffer Queue", qos: .userInteractive, attributes: .concurrent, autoreleaseFrequency: .inherit, target: nil))
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
}
preview.videoPreviewLayer.videoGravity = .resize
session.startRunning()
}
private func handleDetection(request: VNRequest, error: Error?) {
guard let detectionResults = request.results else {
print("No detection results")
return
}
let textResults = detectionResults.map() {
return $0 as? VNTextObservation
}
if textResults.isEmpty {
return
}
textObservations = textResults as! [VNTextObservation]
DispatchQueue.main.async {
guard let sublayers = self.view.layer.sublayers else {
return
}
for layer in sublayers[1...] {
if (layer as? CATextLayer) == nil {
layer.removeFromSuperlayer()
}
}
let viewWidth = self.view.frame.size.width
let viewHeight = self.view.frame.size.height
for result in textResults {
if let textResult = result {
let layer = CALayer()
var rect = textResult.boundingBox
rect.origin.x *= viewWidth
rect.size.height *= viewHeight
rect.origin.y = ((1 - rect.origin.y) * viewHeight) - rect.size.height
rect.size.width *= viewWidth
layer.frame = rect
layer.borderWidth = 2
layer.borderColor = UIColor.red.cgColor
self.view.layer.addSublayer(layer)
}
}
}
}
private var preview: PreviewView {
return view as! PreviewView
}
// private var cameraView: CameraView {
// return view as! CameraView
// }
private func isAuthorized() -> Bool {
let authorizationStatus = AVCaptureDevice.authorizationStatus(for: AVMediaType.video)
switch authorizationStatus {
case .notDetermined:
AVCaptureDevice.requestAccess(for: AVMediaType.video,
completionHandler: { (granted:Bool) -> Void in
if granted {
DispatchQueue.main.async {
self.configureTextDetection()
self.configureCamera()
}
}
})
return true
case .authorized:
return true
case .denied, .restricted: return false
}
}
private var textDetectionRequest: VNDetectTextRectanglesRequest?
private let session = AVCaptureSession()
private var textObservations = [VNTextObservation]()
private var tesseract = G8Tesseract(language: "eng", engineMode: .tesseractOnly)
private var font = CTFontCreateWithName("Helvetica" as CFString, 18, nil)
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
// MARK: - Camera Delegate and Setup
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
var imageRequestOptions = [VNImageOption: Any]()
if let cameraData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil) {
imageRequestOptions[.cameraIntrinsics] = cameraData
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue: 6)!, options: imageRequestOptions)
do {
try imageRequestHandler.perform([textDetectionRequest!])
}
catch {
print("Error occured \(error)")
}
var ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let transform = ciImage.orientationTransform(for: CGImagePropertyOrientation(rawValue: 6)!)
ciImage = ciImage.transformed(by: transform)
let size = ciImage.extent.size
var recognizedTextPositionTuples = [(rect: CGRect, text: String)]()
for textObservation in textObservations {
guard let rects = textObservation.characterBoxes else {
continue
}
var xMin = CGFloat.greatestFiniteMagnitude
var xMax: CGFloat = 0
var yMin = CGFloat.greatestFiniteMagnitude
var yMax: CGFloat = 0
for rect in rects {
xMin = min(xMin, rect.bottomLeft.x)
xMax = max(xMax, rect.bottomRight.x)
yMin = min(yMin, rect.bottomRight.y)
yMax = max(yMax, rect.topRight.y)
}
let imageRect = CGRect(x: xMin * size.width, y: yMin * size.height, width: (xMax - xMin) * size.width, height: (yMax - yMin) * size.height)
let context = CIContext(options: nil)
guard let cgImage = context.createCGImage(ciImage, from: imageRect) else {
continue
}
let uiImage = UIImage(cgImage: cgImage)
tesseract?.image = uiImage
tesseract?.recognize()
guard var text = tesseract?.recognizedText else {
continue
}
text = text.trimmingCharacters(in: CharacterSet.newlines)
if !text.isEmpty {
let x = xMin
let y = 1 - yMax
let width = xMax - xMin
let height = yMax - yMin
recognizedTextPositionTuples.append((rect: CGRect(x: x, y: y, width: width, height: height), text: text))
}
}
textObservations.removeAll()
DispatchQueue.main.async {
let viewWidth = self.view.frame.size.width
let viewHeight = self.view.frame.size.height
guard let sublayers = self.view.layer.sublayers else {
return
}
for layer in sublayers[1...] {
if let _ = layer as? CATextLayer {
layer.removeFromSuperlayer()
}
}
for tuple in recognizedTextPositionTuples {
let textLayer = CATextLayer()
textLayer.backgroundColor = UIColor.clear.cgColor
textLayer.font = self.font
var rect = tuple.rect
rect.origin.x *= viewWidth
rect.size.width *= viewWidth
rect.origin.y *= viewHeight
rect.size.height *= viewHeight
// Increase the size of text layer to show text of large lengths
rect.size.width += 100
rect.size.height += 100
textLayer.frame = rect
textLayer.string = tuple.text
textLayer.foregroundColor = UIColor.green.cgColor
self.view.layer.addSublayer(textLayer)
}
}
}
}
Basically the CameraView is being set as the root view of ViewController, which is why you cannot change its size. You need to make the CameraView to a child view of ViewController's root view in order to change it's size.
Something similar to:
Select ViewController.swift
Remove the following
private var cameraView: CameraView {
return view as! CameraView
}
Replace all cameraView with self.cameraView
Add the following line:
#IBOutlet var cameraView: CameraView!
Replace all self.view with self.cameraView
Select Main.storyboard
Select Camera View in Document Outline
Go to Identity Inspector (⌥⌘3) and clear Class, which should contain CameraView. After you cleared it, it should show UIView
Open Library (⇧⌘L) and add a new View inside the original Camera View (Feel free to adjust the size of this new view)
Select this new view and go to Identity Inspector (⌥⌘3) and change the Class to CameraView
Select View Controller in Storyboard and go to Connections Inspector (⌥⌘6)
Connect the outlet CameraView
And if you don't like text going out of bound of cameraView, you can simply add the following in viewDidLoad:
self.cameraView.clipsToBounds = true

How to get the right position of an View for animations?

I want to create a set game App. For that, I have a deck of cards from which I want the cards to (animating) move on the board.
My problem is when I do this:
private func createDeck() {
allCards.forEach{ card in
card.isFaceUp = false
card.frame = Deck.frame
GameBoard.allCardsInDeck.append(card)
}
}
the cards appearing from another position then the deck.
Doesn´t Deck.frame give me the current position of the view in its superview? my card appearing from the top but my deckView is at the bottom.
this is my code ViewController:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var informationLabel: UILabel!
#IBOutlet weak var ScoreLabel: UILabel!
#IBOutlet weak var Deck: Deck!
#IBOutlet weak var GameBoard: CardBoardView! {
didSet {
let swipeGesture = UISwipeGestureRecognizer(target: self, action: #selector(swipe))
swipeGesture.direction = .down
GameBoard.addGestureRecognizer(swipeGesture)
let rotationGesture = UIRotationGestureRecognizer(target: self, action: #selector(rotation))
GameBoard.addGestureRecognizer(rotationGesture)
}
}
private var gameIsStarted = false
private var game: Game = Game() {
didSet {
updateScoreLabel()
}
}
private var selectedCards = [CardSubview] ()
private var indexOfAllCards: Int = 0
private lazy var allCards = getAllCards()
private var cardsBackgroundColor = #colorLiteral(red: 1, green: 1, blue: 1, alpha: 1)
#objc func rotation(recognizer: UIRotationGestureRecognizer){
switch recognizer.state {
case .ended: GameBoard.mixCards()
default: break
}
}
#objc private func swipe(recognizer: UISwipeGestureRecognizer) {
switch recognizer.state {
case .ended: add3Cards()
default: break
}
}
private func getAllCards () -> [CardSubview] {
var cards = [CardSubview]()
for index in 0..<game.cards.count {
cards.append(CardSubview())
cards[index].fill = game.cards[index].strokeIdentifier
cards[index].color = game.cards[index].colorIdentifier
cards[index].form = game.cards[index].form
cards[index].occurence = game.cards[index].occurenceOfForm
cards[index].addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(tab)))
cards[index].backgroundColor = cardsBackgroundColor
cards[index].indexNr = index
}
return cards
}
#objc private func tab (recognizer: UITapGestureRecognizer) {
switch recognizer.state {
case .ended:
if let card = recognizer.view as? CardSubview, card.isFaceUp == true {
card.isSelected = !card.isSelected
if card.isSelected {
selectedCards.append(card)
if selectedCards.count > 2 {
let isSet = game.isSet(arrayOfIndexes: [selectedCards[0].indexNr,selectedCards[1].indexNr, selectedCards[2].indexNr]) == true
if isSet {
game.score += 1
selectedCards.forEach { card in
GameBoard.doWhenMatched(card: card)
card.isSelected = false
selectedCards.removeAll()
}
findSet()
} else if isSet == false {
game.score -= 1
selectedCards.forEach{ card in
card.isSelected = false
}
selectedCards.removeAll()
}
}
} else if card.isSelected == false, selectedCards.contains(card) {
selectedCards.remove(at: selectedCards.index(of: card)!)
}
}
default: break
}
}
private func findSet() {
if GameBoard.allOpenCards.count > 0 {
var setInOpenCards = Bool()
var allSetsInOpenCards = Int()
for index1 in 0..<GameBoard.allOpenCards.count-2 {
for index2 in (index1+1)..<GameBoard.allOpenCards.count-1 {
for index3 in (index2+1)..<GameBoard.allOpenCards.count {
setInOpenCards = game.isSet(arrayOfIndexes: [allCards[index1].indexNr, allCards[index2].indexNr, allCards[index3].indexNr])
if (setInOpenCards == true){
allSetsInOpenCards+=1
}
}
}
}
informationLabel.text = "Sets available: \(allSetsInOpenCards)"
} else {
informationLabel.text = "Sets available: \(0)"
}
}
#IBAction private func newGame() {
game = Game()
GameBoard.removeSubviewsFromDeck()
GameBoard.allCardsInDeck.removeAll()
createDeck()
GameBoard.backgroundColor = UIColor.black
GameBoard.removeSubviews()
selectedCards.removeAll()
gameIsStarted = true
allCards = getAllCards()
indexOfAllCards = 0
GameBoard.allOpenCards.removeAll()
add12Cards()
}
private func updateScoreLabel () {
if game.score >= 0 {
ScoreLabel.backgroundColor = #colorLiteral(red: 0, green: 0.9768045545, blue: 0, alpha: 0.5)
} else if game.score < 0 {
ScoreLabel.backgroundColor = #colorLiteral(red: 1, green: 0.1491314173, blue: 0, alpha: 0.5)
}
ScoreLabel.text = "Score: \(game.score)"
}
private func add3Cards() {
if indexOfAllCards < allCards.count, gameIsStarted == true {
for _ in 0...2 {
GameBoard.allOpenCards.append(GameBoard.allCardsInDeck[indexOfAllCards])
indexOfAllCards+=1
}
}
findSet()
}
private func add12Cards() {
let numbersOfFirst12Cards = 12
if gameIsStarted == true {
for _ in 0..<numbersOfFirst12Cards {
GameBoard.allOpenCards.append(GameBoard.allCardsInDeck[indexOfAllCards])
indexOfAllCards += 1
}
}
findSet()
}
private func createDeck() {
allCards.forEach{ card in
card.isFaceUp = false
card.frame = Deck.frame
GameBoard.allCardsInDeck.append(card)
}
}
}
This is my CardBoardView where all my animations happening
import UIKit
#IBDesignable
class CardBoardView: UIView {
let durationForDisappearingCards = 2.0
let delayForDisappearingCards = 0.0
lazy var animator = UIDynamicAnimator(referenceView: self)
lazy var cardBehavior = CardBehavior(in: animator)
var allOpenCards = [CardSubview]() {
didSet { addSubview(); setNeedsLayout();}
}
var allCardsInDeck = [CardSubview] () {
didSet{ setNeedsLayout()}
}
struct Layout {
static let ratio:CGFloat = 2.0
static let insetByX:CGFloat = 2.0
static let insetByY:CGFloat = 2.0
}
private func addSubview() {
for card in allOpenCards {
addSubview(card)
}
}
public func removeSubviews() {
for card in allOpenCards {
card.removeFromSuperview()
}
}
public func removeSubviewsFromDeck() {
allCardsInDeck.forEach{ card in
card.removeFromSuperview()
}
}
public func mixCards() {
var mixCards = [CardSubview]()
for _ in 0..<allOpenCards.count {
let random = allOpenCards.count.arc4random
let card = allOpenCards[random]
mixCards.append(card)
allOpenCards.remove(at: random)
}
allOpenCards = mixCards
}
public func doWhenMatched(card: CardSubview) {
// cardBehavior.addItem(card)
UIView.animate(
withDuration: self.durationForDisappearingCards,
delay: self.delayForDisappearingCards,
animations: {
card.frame = CGRect(x: (self.superview?.frame.midX)!-card.bounds.width/2, y: (self.superview?.frame.minY)!-card.bounds.height, width: card.bounds.width , height: card.bounds.height)
}, completion: { finish in
card.removeFromSuperview()
self.allOpenCards.remove(at: self.allOpenCards.index(of: card)!)
}
)
}
// let newPosition = CGRect(x: self.frame.midX-card.bounds.width/2 , y: self.frame.minY, width: card.bounds.width, height: card.bounds.height)
//
//
//
// UIView.animate(withDuration: durationForDisappearingCards, delay: delayForDisappearingCards, options: [.allowUserInteraction], animations: {card.alpha=0.0})
override func layoutSubviews() {
super.layoutSubviews()
var grid = Grid(layout: .aspectRatio(Layout.ratio), frame: self.bounds)
grid.cellCount = allOpenCards.count
var secondsForDelay:CGFloat = 0
let secondsForFlipCard:CGFloat = 0.5
for index in allOpenCards.indices {
if self.allOpenCards[index].frame != (grid[index]?.insetBy(dx: Layout.insetByX, dy: Layout.insetByY)) ?? CGRect.zero {
UIView.animate(
withDuration: 0.5,
delay: TimeInterval(secondsForDelay) ,
animations: {self.allOpenCards[index].frame = (grid[index]?.insetBy(dx: Layout.insetByX, dy: Layout.insetByY)) ?? CGRect.zero},
completion: {
finish in
if self.allOpenCards[index].isFaceUp != true {
UIView.transition(with: self.allOpenCards[index],
duration: TimeInterval(secondsForFlipCard),
options: [.transitionFlipFromLeft],
animations: {
self.allOpenCards[index].isFaceUp = true
}
)
}
}
)
secondsForDelay+=0.02
}
}
}
}
private extension Int {
var arc4random: Int {
if self > 0 {
return Int(arc4random_uniform(UInt32(self)))
} else if self < 0 {
return -Int(arc4random_uniform(UInt32(self)))
} else {
return 0
}
}
}
private extension CGFloat {
var arc4random: CGFloat {
if self > 0 {
return CGFloat(arc4random_uniform(UInt32(self)))
} else if self < 0 {
return -CGFloat(arc4random_uniform(UInt32(self)))
} else {
return 0
}
}
}

how i can add a text on pdf?

I found some code to show PDF file and, the code has a pencil to draw on pdf but I want to change the pencil to text. When I click on button I can write a text and move the text anywhere on view
I want to add text and change the size and move it as I want.
//
// ViewController.swift
// PDFTest
//
// Created by Prashoor Chitnis on 22/11/17.
// Copyright © 2017 InfoBeans LLC. All rights reserved.
//
import UIKit
import PDFKit
struct PAIRAnn {
var drawing: PDFAnnotation;
var button: PDFAnnotation;
var currentPage: PDFPage;
}
class ViewController: UIViewController, PDFViewDelegate, DrawEventListener {
#IBOutlet weak var closeBtn: UIBarButtonItem!
#IBOutlet weak var saveBtn: UIBarButtonItem!
#IBOutlet weak var undoBtn: UIBarButtonItem!
#IBOutlet weak var pencil: UIBarButtonItem!
var selectedAnnotation = [PAIRAnn]()
var position: CGPoint?
var currentPage : PDFPage?
var pView: PDFView?
var pdfView : PDFView {
get {
if pView == nil {
pView = PDFView(frame: self.view.frame)
pView?.delegate = self
}
return pView!
}
}
var fadeView: UIView?
var messageView : UIView {
get {
if fadeView == nil {
fadeView = UIView(frame: CGRect(x: (self.view.frame.size.width - 300)/2, y: 30, width: 300, height: 55))
var frm = (fadeView?.bounds)!
frm.origin.y = 15
frm.size.height = 25
let text = UILabel(frame: frm)
text.textAlignment = .center
text.text = "Double-Click on any spot to begin"
text.font = UIFont.systemFont(ofSize: 16)
fadeView?.addSubview(text)
fadeView?.layer.cornerRadius = 27.5
fadeView?.layer.borderWidth = 1.5
fadeView?.backgroundColor = UIColor.white.withAlphaComponent(0.8)
}
return fadeView!
}
}
var drawVw: DrawScreen?
var drawView : DrawScreen {
get {
if drawVw == nil {
drawVw = DrawScreen(frame: self.view.frame)
drawVw?.listener = self
}
return drawVw!
}
}
private var gesture: UITapGestureRecognizer?
private var gestureRecognizer: UITapGestureRecognizer {
get {
if gesture == nil {
gesture = UITapGestureRecognizer.init(target: self, action: #selector(addDrawing(sender:)))
gesture?.numberOfTapsRequired = 2
}
return gesture!
}
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
self.closeBtn.isEnabled = false
self.saveBtn.isEnabled = false
self.undoBtn.isEnabled = false
self.pencil.isEnabled = true
self.view.insertSubview(self.pdfView, at: 0)
let url = Bundle.main.url(forResource: "EN8", withExtension: "pdf")
self.pdfView.document = PDFDocument.init(url: url!)
}
#IBAction func addPencil(_ sender: UIBarButtonItem) {
self.pencil.isEnabled = false
self.view.insertSubview(self.messageView, aboveSubview: self.pdfView)
self.pdfView.addGestureRecognizer(self.gestureRecognizer)
}
func removeMessage() {
if self.messageView.superview == nil {
return
}
UIView.animate(withDuration: 0.4, animations: {
self.messageView.transform = CGAffineTransform.init(translationX: 0, y: -70)
}) { (success) in
self.messageView.removeFromSuperview()
self.messageView.transform = CGAffineTransform.identity
}
}
#objc func addDrawing(sender: UITapGestureRecognizer) {
self.removeMessage()
self.position = sender.location(in: self.view)
self.view.insertSubview(self.drawView, at: 1)
currentPage = self.pdfView.currentPage
self.pdfView.go(to: currentPage!)
self.pencil.isEnabled = false
self.undoBtn.isEnabled = false
self.saveBtn.isEnabled = false
self.closeBtn.isEnabled = true
}
func pdfViewWillClick(onLink sender: PDFView, with url: URL) {
var checkVal = url.absoluteString
checkVal = checkVal.replacingOccurrences(of: "path://", with: "")
var draw: PDFAnnotation?
for ann in self.allLinks() {
let value = "\(ann.value(forAnnotationKey: PDFAnnotationKey(rawValue: "Link"))!)"
if checkVal == value {
draw = ann
break
}
}
var currpage: PDFPage? = nil
var button: PDFAnnotation?
for (btn, page) in self.allButtons() {
let value = "\(btn.value(forAnnotationKey: PDFAnnotationKey(rawValue: "_Path"))!)"
if checkVal == value {
currpage = page
button = btn
break
}
}
if draw != nil && button != nil && currpage != nil {
let color = draw?.value(forAnnotationKey: PDFAnnotationKey(rawValue: "_Color")) as! String
switch (color) {
case "red":
draw?.color = UIColor.red
break
default:
break
}
currpage?.removeAnnotation(button!)
self.editButtonItem.isEnabled = false
self.undoBtn.isEnabled = true
self.pencil.isEnabled = false
self.selectedAnnotation.append(PAIRAnn.init(drawing: draw!, button: button!, currentPage: currpage!))
}
}
#IBAction func undoAction(_ sender: Any) {
self.removeMessage()
if self.selectedAnnotation.count > 0 {
for pairs in self.selectedAnnotation {
pairs.drawing.color = UIColor.clear
pairs.currentPage.addAnnotation(pairs.button)
}
self.selectedAnnotation = []
self.closeBtn.isEnabled = false
self.saveBtn.isEnabled = false
self.undoBtn.isEnabled = false
self.pencil.isEnabled = true
}
else {
self.drawView.undo()
}
}
#IBAction func removeDrawing(sender: Any) {
self.removeMessage()
self.pdfView.removeGestureRecognizer(self.gestureRecognizer)
self.drawView.removeFromSuperview()
self.closeBtn.isEnabled = false
self.saveBtn.isEnabled = false
self.undoBtn.isEnabled = false
self.pencil.isEnabled = true
}
func drawDidBegin() {
self.undoBtn.isEnabled = true
self.saveBtn.isEnabled = true
}
func drawingGotEmpty() {
self.undoBtn.isEnabled = false
self.saveBtn.isEnabled = false
}
#IBAction func saveDrawing(_ sender: Any) {
self.pdfView.removeGestureRecognizer(self.gestureRecognizer)
self.currentPage = self.pdfView.page(for: self.drawView.touches[0].path.first!, nearest: true)
var arrayPath = [UIBezierPath]()
for touches in self.drawView.touches {
let bPth = UIBezierPath.init()
let point = self.pdfView.convert(touches.path.first!, to: self.currentPage!)
bPth.move(to: point)
if touches.path.count > 1 {
for i in 1...touches.path.count-1 {
let tch = self.pdfView.convert(touches.path[i], to: self.currentPage!)
bPth.addLine(to: tch)
}
}
arrayPath.append(bPth)
}
let ann = PDFAnnotation.init(bounds: (self.currentPage?.bounds(for: self.pdfView.displayBox))!, forType: PDFAnnotationSubtype.ink, withProperties: nil)
ann.shouldDisplay = false
for bPth in arrayPath {
ann.add(bPth)
}
ann.color = UIColor.clear
let stamp = "PAGE_\(Date.init().timeIntervalSince1970)"
ann.setValue(stamp, forAnnotationKey: PDFAnnotationKey.init(rawValue: "Link"))
ann.setValue("red", forAnnotationKey: PDFAnnotationKey.init(rawValue: "_Color"))
self.currentPage?.addAnnotation(ann)
let linkPoint = self.pdfView.convert(self.position!, to: self.currentPage!)
let linkAnn = PDFAnnotation.init(bounds: CGRect(x: linkPoint.x - 20, y: linkPoint.y - 12, width: 40, height: 24), forType: PDFAnnotationSubtype.widget, withProperties: nil)
linkAnn.widgetFieldType = .button
linkAnn.widgetDefaultStringValue = "Open"
linkAnn.widgetControlType = .pushButtonControl
linkAnn.action = PDFActionURL(url: URL(string: "path://\(stamp)")!)
linkAnn.setValue(stamp, forAnnotationKey: PDFAnnotationKey.init(rawValue: "_Path"))
linkAnn.backgroundColor = UIColor.yellow
self.currentPage?.addAnnotation(linkAnn)
self.drawView.removeFromSuperview()
self.closeBtn.isEnabled = false
self.saveBtn.isEnabled = false
self.undoBtn.isEnabled = false
self.pencil.isEnabled = true
}
func allButtons() -> [(PDFAnnotation,PDFPage)]{
var list = [(PDFAnnotation,PDFPage)]()
let checkType = PDFAnnotationSubtype.widget.rawValue.replacingOccurrences(of: "/", with: "");
for page in self.pdfView.visiblePages() {
for ann in page.annotations {
if ann.type! == checkType && ann.widgetFieldType == .button {
list.append((ann, page))
}
}
}
return list
}
func allLinks() -> [PDFAnnotation]{
var list = [PDFAnnotation]()
let checkType = PDFAnnotationSubtype.ink.rawValue.replacingOccurrences(of: "/", with: "");
for page in self.pdfView.visiblePages() {
for ann in page.annotations {
if ann.type! == checkType {
list.append(ann)
}
}
}
return list
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}

Resources