I check if the image is vertical or horizontal. If it is horizontal, I rotate it:
#IBOutlet private weak var img: UIImageView!
img.image = file.image
let imageSize = file.image?.size
let imgWidth = imageSize?.width ?? 0
let imgHeight = imageSize?.height ?? 0
if imgWidth > imgHeight {
print("IMG HORIZONTAL")
imgDetail.transform = imgDetail.transform.rotated(by: .pi / 2)
} else {
print("IMG VERTICAL")
}
But it leaves me a space around the image. I would like it to be at the maximum size of the UIImageView.
Try this, declare your imageView:
let YourImageView: UIImageView = {
let iv = UIImageView()
iv.image = UIImage(named: "myImage")?.withRenderingMode(.alwaysOriginal)
iv.backgroundColor = .clear
iv.contentMode = .scaleAspectFit
iv.isUserInteractionEnabled = true
iv.clipsToBounds = true
iv.translatesAutoresizingMaskIntoConstraints = false
return iv
}()
in viewDidLoad set if statement and call setupConstraints:
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = .white
guard let imgWidth = YourImageView.image?.size.width else { return }
guard let imgHeight = YourImageView.image?.size.height else { return }
if imgWidth > imgHeight {
print("IMG HORIZONTAL")
guard let image = YourImageView.image else { return }
let newImage = image.rotate(radians: .pi / 2) // image rotation
YourImageView.image = newImage
} else {
print("IMG VERTICAL")
}
setupConstraints()
}
set up constraints
fileprivate func setupConstraints() {
view.addSubview(YourImageView)
YourImageView.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
YourImageView.centerYAnchor.constraint(equalTo: view.centerYAnchor).isActive = true
YourImageView.leadingAnchor.constraint(equalTo: view.leadingAnchor).isActive = true
YourImageView.trailingAnchor.constraint(equalTo: view.trailingAnchor).isActive = true
}
write image extension
extension UIImage {
func rotate(radians: Float) -> UIImage? {
var newSize = CGRect(origin: CGPoint.zero, size: self.size).applying(CGAffineTransform(rotationAngle: CGFloat(radians))).size
// Trim off the extremely small float value to prevent core graphics from rounding it up
newSize.width = floor(newSize.width)
newSize.height = floor(newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
guard let context = UIGraphicsGetCurrentContext() else { return UIImage()}
// Move origin to middle
context.translateBy(x: newSize.width/2, y: newSize.height/2)
// Rotate around middle
context.rotate(by: CGFloat(radians))
// Draw the image at its center
self.draw(in: CGRect(x: -self.size.width/2, y: -self.size.height/2, width: self.size.width, height: self.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
this is the result normal (without image rotate extension call) and rotated (with image rotate extension call):
Related
Need to mark the rec boxes around string and then to get that string after tapping
import UIKit
import Vision
class ViewController: UIViewController, ImageGet {
//MARK: OUTLETS
#IBOutlet weak var selectButton: UIButton!
//MARK: VARIABLES
var objU = UtilityClass()
var image:UIImage?
var str:String?
var uiButton : UIButton?
var arrayString = [String]()
var imageView : UIImageView = UIImageView()
//MARK: DELEGATE FUNCTION
func img(image: UIImage) {
self.image = image
imageView.image = image
setUp()
}
override func viewDidLoad() {
super.viewDidLoad()
imageView.isUserInteractionEnabled = true
// Do any additional setup after loading the view.
}
//MARK: SETUPUI
func setUp() {
let realImg = resizeImage(image: (imageView.image!) , targetSize:CGSize(width: view.frame.width, height: view.frame.height) )
self.image = realImg
self.imageView .image = self.image
imageView.isUserInteractionEnabled = true
self.imageView.frame = CGRect(x: 0, y: 0, width: realImg.size.width, height: realImg.size.height)
view.addSubview(imageView)
guard let cgimg = realImg.cgImage else {return}
let requestHandler = VNImageRequestHandler(cgImage: cgimg)
let req = VNRecognizeTextRequest(completionHandler: recognizeTextHandler)
req.recognitionLevel = .accurate
do {
try requestHandler.perform([req])
} catch {
print("Unable to perform the request: \(error)")
}
}
//MARK: SELECT THE IMAGE
#IBAction func selectButtontapped(_ sender: Any) {
objU.delegate = self
objU.obj = self
objU.ImageGet()
}
func recognizeTextHandler(request : VNRequest , error:Error?) {
guard let observation = request.results as? [VNRecognizedTextObservation], error == nil else {
return
}
_ = observation.compactMap({
$0.topCandidates(1).first?.string
}).joined(separator: "/n")
for subView in imageView.subviews {
subView.removeFromSuperview()
}
let boundingRect :[CGRect] = observation.compactMap{
observation in
guard let candidate = observation.topCandidates(1).first else {return .zero}
//find the bounding box observation
let stringRange = candidate.string.startIndex..<candidate.string.endIndex
let boxObservation = try? candidate.boundingBox(for: stringRange)
let boundingBox = boxObservation?.boundingBox ?? .zero
str = candidate.string
self.arrayString.append(str!)
let rectInImg = VNImageRectForNormalizedRect(boundingBox, Int((imageView.frame.size.width)), Int((imageView.frame.size.height)))
let convertedRect = self.getConvertedRect(boundingBox: observation.boundingBox, inImage:image!.size , containedIn: (imageView.bounds.size))
drawBoundBox(rect: convertedRect)
return rectInImg
}
print(arrayString)
print(boundingRect)
}
func drawBoundBox(rect: CGRect) {
uiButton = UIButton(type: .custom)
uiButton?.frame = rect
uiButton?.layer.borderColor = UIColor.systemPink.cgColor
uiButton?.setTitle("", for: .normal)
uiButton?.layer.borderWidth = 2
uiButton?.tag = arrayString.count
imageView.addSubview(uiButton ?? UIButton())
uiButton?.addTarget(self, action: #selector(pressed(_:)), for: .touchUpInside)
}
#objc func pressed(_ sender : UIButton) {
alert(key: arrayString[sender.tag - 1])
}
//MARK: CONVERT THE NORMALISED BOUNDING RECT
func getConvertedRect(boundingBox: CGRect, inImage imageSize: CGSize, containedIn containerSize: CGSize) -> CGRect {
let rectOfImage: CGRect
let imageAspect = imageSize.width / imageSize.height
let containerAspect = containerSize.width / containerSize.height
if imageAspect > containerAspect { /// image extends left and right
let newImageWidth = containerSize.height * imageAspect /// the width of the overflowing image
let newX = -(newImageWidth - containerSize.width) / 2
rectOfImage = CGRect(x: newX, y: 0, width: newImageWidth, height: containerSize.height)
} else { /// image extends top and bottom
let newImageHeight = containerSize.width * (1 / imageAspect) /// the width of the overflowing image
let newY = -(newImageHeight - containerSize.height) / 2
rectOfImage = CGRect(x: 0, y: newY, width: containerSize.width, height: newImageHeight)
}
let newOriginBoundingBox = CGRect(
x: boundingBox.origin.x,
y: 1 - boundingBox.origin.y - boundingBox.height,
width: boundingBox.width,
height: boundingBox.height
)
var convertedRect = VNImageRectForNormalizedRect(newOriginBoundingBox, Int(rectOfImage.width), Int(rectOfImage.height))
/// add the margins
convertedRect.origin.x += rectOfImage.origin.x
convertedRect.origin.y += rectOfImage.origin.y
return convertedRect
}
//MARK: RESIZE THE IMAGE ACCORD TO DEVICE
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
//MARK: POPPING ALERT WITH STRING
func alert(key:String){
let alertController = UIAlertController(title: "String", message: key, preferredStyle: .alert)
let OKAction = UIAlertAction(title: "OK", style: .default) {
(action: UIAlertAction!) in
// Code in this block will trigger when OK button tapped.
}
let copyAction = UIAlertAction(title: "Copy", style: .default) {
(action: UIAlertAction!) in
UIPasteboard.general.string = key
}
alertController.addAction(copyAction)
alertController.addAction(OKAction)
self.present(alertController, animated: true, completion: nil)
}
}
I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.
I have a UIImageView which is loaded by an ImagePicker with a photo (3k x 4k sized) which is filled into the view using AspectFit mode. I then pan and zoom the image with GestureRecognizers which adjust the transform scale and translation of the view. This works well, here is the code:
#objc private func startZooming(_ sender: UIPinchGestureRecognizer) {
let scaleResult = sender.view?.transform.scaledBy(x: sender.scale, y: sender.scale)
guard let scale = scaleResult, scale.a > 1, scale.d > 1 else { return }
sender.view?.transform = scale
sender.scale = 1
}
#objc private func startPanning(_ sender: UIPanGestureRecognizer) {
let translate = sender.translation(in: mainImageView)
let center = mainImageView.center
mainImageView.center = CGPoint(x: center.x + translate.x, y: center.y + translate.y)
self.currImageTranslation.x += translate.x
self.currImageTranslation.y += translate.y
sender.setTranslation(CGPoint(x: 0, y: 0), in: mainImageView)
}
My problem is that I need the resulting visual result of the transform as a CGImage to feed into an visual inference engine. When I get the image from the UIImageView:
let image = myUIImageView.image // I get the original untransformed image
when I try:
UIGraphicsBeginImageContextWithOptions(image!.size, false, 0.0)
myImageView.layer.render(in: UIGraphicsGetCurrentContext()!)
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
newImage is also just the original image (untransformed)
when I try:
let rect = mainImageView.bounds
let scale = UIScreen.main.scale
var t = self.currImageTranslation
let tScale = mainImageView.transform.a
let zoomOffsetFactorX = ((t.x / rect.size.width) * tScale)
let zoomOffsetFactorY = ((t.y / rect.size.height) * tScale)
t.x = t.x - zoomOffsetFactorX
t.y = (t.y + 20.0) + zoomOffsetFactorY
// create a context
UIGraphicsBeginImageContextWithOptions(rect.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()!
let transform = mainImageView.transform
let imrect = CGRect(origin: t, size: rect.size)
context.concatenate(transform)
let tempImage = mainImageView.image
tempImage!.draw(in: imrect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
the newImage translates properly, but only at scale = 1.0, zooming with or without scaling creates fractional images, i.e., clipped to the zero, zero origin.
I would like to generate a UIImage of just what the user sees on the screen in that ImageView. Can anyone help?
With #DonMag's generous suggestions, I tried the following:
func enableZoom() {
let pinchGesture = UIPinchGestureRecognizer(target: self,
action: #selector(startZooming(_:)))
let panGesture = UIPanGestureRecognizer(target: self,
action: #selector(startPanning(_:)))
containerView.isUserInteractionEnabled = true
containerView.addGestureRecognizer(pinchGesture)
containerView.addGestureRecognizer(panGesture)
}
#objc private func startZooming(_ sender: UIPinchGestureRecognizer) {
let scaleResult = sender.view?.transform.scaledBy(x: sender.scale, y: sender.scale)
guard let scale = scaleResult, scale.a > 1, scale.d > 1 else { return }
sender.view?.transform = scale
newImage()
sender.scale = 1
}
#objc private func startPanning(_ sender: UIPanGestureRecognizer) {
let translate = sender.translation(in: mainImageView)
let center = mainImageView.center
mainImageView.center = CGPoint(x: center.x + translate.x, y: center.y + translate.y)
self.currImageTranslation.x += translate.x
self.currImageTranslation.y += translate.y
sender.setTranslation(CGPoint(x: 0, y: 0), in: mainImageView)
newImage()
}
//MARK:- create cropped/panned image and infer
func newImage() -> Void {
let format = UIGraphicsImageRendererFormat()
// we want a 1:1 points-to-pixels output
format.scale = 1
let renderer = UIGraphicsImageRenderer(size: containerView.bounds.size, format: format)
let image = renderer.image { ctx in
containerView.drawHierarchy(in: containerView.bounds, afterScreenUpdates: true)
}
self.currCGImage = image.cgImage
//repeatInference() // runs only once every 0.3 s
return
}
Now the "newImage" correctly reflects the translations created by the panGesture, but seemingly ignores the pinchGesture changes in scale. The drawHierarchy seems to be ignoring the imposed transforms in the UIView. Where am I going wrong?
I'm not entirely sure what you're doing with your transform code, but this may work for you -- and is much simpler...
If I understand correctly, you have a UIImageView with .aspectFit and it is in a UIView "container" and you apply a scale and translate transform to the image view:
let format = UIGraphicsImageRendererFormat()
// we want a 1:1 points-to-pixels output
format.scale = 1
let renderer = UIGraphicsImageRenderer(size: containerView.bounds.size, format: format)
let image = renderer.image { ctx in
containerView.drawHierarchy(in: containerView.bounds, afterScreenUpdates: true)
}
Here is a complete example implementation:
class TransImageViewController: UIViewController {
let containerView = UIView()
let imageView = UIImageView()
override func viewDidLoad() {
super.viewDidLoad()
guard let img = UIImage(named: "bkg_2400x1600") else {
fatalError("Could not load the image!!!")
}
let stack = UIStackView()
stack.spacing = 20
stack.distribution = .fillEqually
let b1 = UIButton()
let b2 = UIButton()
[b1, b2].forEach { b in
b.backgroundColor = .red
b.setTitleColor(.white, for: .normal)
b.setTitleColor(.gray, for: .highlighted)
}
b1.setTitle("Transform", for: [])
b2.setTitle("Capture", for: [])
stack.addArrangedSubview(b1)
stack.addArrangedSubview(b2)
[stack, containerView, imageView].forEach {
$0.translatesAutoresizingMaskIntoConstraints = false
}
view.addSubview(stack)
view.addSubview(containerView)
containerView.addSubview(imageView)
let g = view.safeAreaLayoutGuide
NSLayoutConstraint.activate([
// buttons stack at top
stack.topAnchor.constraint(equalTo: g.topAnchor, constant: 20.0),
stack.leadingAnchor.constraint(equalTo: g.leadingAnchor, constant: 40.0),
stack.trailingAnchor.constraint(equalTo: g.trailingAnchor, constant: -40.0),
// container 400x500 centered
containerView.widthAnchor.constraint(equalToConstant: 400),
containerView.heightAnchor.constraint(equalToConstant: 500),
containerView.centerXAnchor.constraint(equalTo: g.centerXAnchor),
containerView.centerYAnchor.constraint(equalTo: g.centerYAnchor),
// imageView constrained all 4 sides to container
imageView.topAnchor.constraint(equalTo: containerView.topAnchor),
imageView.leadingAnchor.constraint(equalTo: containerView.leadingAnchor),
imageView.trailingAnchor.constraint(equalTo: containerView.trailingAnchor),
imageView.bottomAnchor.constraint(equalTo: containerView.bottomAnchor),
])
containerView.clipsToBounds = true
imageView.contentMode = .scaleAspectFit
imageView.image = img
view.backgroundColor = .blue
containerView.backgroundColor = .yellow
imageView.backgroundColor = .orange
b1.addTarget(self, action: #selector(self.doTransform), for: .touchUpInside)
b2.addTarget(self, action: #selector(self.grabImage), for: .touchUpInside)
}
#objc func doTransform() -> Void {
var t = CGAffineTransform.identity
t = t.scaledBy(x: 4.0, y: 4.0)
t = t.translatedBy(x: -120, y: 40)
imageView.transform = t
}
#objc func grabImage() -> Void {
let format = UIGraphicsImageRendererFormat()
// we want a 1:1 points-to-pixels output
format.scale = 1
let renderer = UIGraphicsImageRenderer(size: containerView.bounds.size, format: format)
let image = renderer.image { ctx in
containerView.drawHierarchy(in: containerView.bounds, afterScreenUpdates: true)
}
// do what you want with the resulting image
print("Resulting image size:", image.size)
}
}
Using this image as my "bkg_2400x1600" image asset:
The above code starts like this (my "container" view is 400x500 pts):
Taping the "Transform" button applies .scaledBy(x: 4.0, y: 4.0) and .translatedBy(x: -120, y: 40):
and then tapping "Capture" gives me this 400x500 pixel image:
#DonMag put me on the right track. However, to get the correct scaling in the grabbed image, I needed to apply the scaleBy transform to the UIImageView, not the containerview. Also, I needed to set the format scale to the current scale factor from the pinchGesture. Here is the working version:
func enableZoom() {
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(startZooming(_:)))
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(startPanning(_:)))
containerView.isUserInteractionEnabled = true
containerView.addGestureRecognizer(pinchGesture)
containerView.addGestureRecognizer(panGesture)
}
#objc private func startZooming(_ sender: UIPinchGestureRecognizer) {
// let scaleResult = sender.view?.transform.scaledBy(x: sender.scale, y: sender.scale)
// guard let scale = scaleResult, scale.a > 1, scale.d > 1 else { return }
// sender.view?.transform = scale
let scaleResult = mainImageView?.transform.scaledBy(x: sender.scale, y: sender.scale)
guard let scale = scaleResult, scale.a > 1, scale.d > 1 else { return }
mainImageView.transform = scale
currImageScale = sender.scale
newImage()
sender.scale = 1
}
#objc private func startPanning(_ sender: UIPanGestureRecognizer) {
let translate = sender.translation(in: mainImageView)
let center = mainImageView.center
mainImageView.center = CGPoint(x: center.x + translate.x, y: center.y + translate.y)
self.currImageTranslation.x += translate.x
self.currImageTranslation.y += translate.y
sender.setTranslation(CGPoint(x: 0, y: 0), in: mainImageView)
newImage()
}
//MARK:- create cropped/panned image and infer
func newImage() -> Void {
let format = UIGraphicsImageRendererFormat()
// we want a 1:1 points-to-pixels output
format.scale = currImageScale
let renderer = UIGraphicsImageRenderer(size: containerView.bounds.size, format: format)
let image = renderer.image { ctx in
containerView.drawHierarchy(in: containerView.bounds, afterScreenUpdates: true)
}
self.currCGImage = image.cgImage
repeatInference() // runs only once every 0.3 s
return
}
I'm making a custom UISlider with thumb like this:
But here is what I get:
I want my thumb show with full height of the grey container, my image has size 160x160 for #2x. But when set into UISlider it doesn't act like that. Here is the view debugging:
I'm use a custom class for this UISlider, where's wrong in the code?:
class CustomSlider: UISlider {
#IBInspectable var trackHeight: CGFloat = 0.0001
// #IBInspectable var thumbRadius: CGFloat = 20
// Custom thumb view which will be converted to UIImage
// and set as thumb. You can customize it's colors, border, etc.
private lazy var thumbView: UIView = {
let thumb = UIImageView()
// thumb.backgroundColor = .white
// thumb.layer.borderWidth = 0.4
// thumb.layer.borderColor = UIColor.white.cgColor
thumb.image = UIImage(named: "icon_slider")
return thumb
}()
override func awakeFromNib() {
super.awakeFromNib()
let thumb = thumbImage()
setThumbImage(thumb, for: .normal)
}
private func thumbImage() -> UIImage {
thumbView.frame = CGRect(x: 0, y: 50/2, width: 50, height: 50)
// thumbView.dropShadow(color: .red, opacity: 1, offSet: CGSize(width: -1, height: 1), radius: 3, scale: true)
thumbView.layer.cornerRadius = 4
thumbView.layer.masksToBounds = true
// Convert thumbView to UIImage
if #available(iOS 10.0, *) {
let renderer = UIGraphicsImageRenderer(bounds: thumbView.bounds)
return renderer.image { rendererContext in
thumbView.layer.render(in: rendererContext.cgContext)
}
} else {
UIGraphicsBeginImageContext(thumbView.frame.size)
self.layer.render(in:UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: image!.cgImage!)
}
}
override func trackRect(forBounds bounds: CGRect) -> CGRect {
// Set custom track height
// As seen here: https://stackoverflow.com/a/49428606/7235585
var newRect = super.trackRect(forBounds: bounds)
newRect.size.height = trackHeight
return newRect
}
}
I refered to this question, and did add extension to UIView:
extension UIView {
// Using a function since `var image` might conflict with an existing variable
// (like on `UIImageView`)
func asImage() -> UIImage {
if #available(iOS 10.0, *) {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
} else {
UIGraphicsBeginImageContext(self.frame.size)
self.layer.render(in:UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: image!.cgImage!)
}
}
}
Then i create very simple test view:
private class FakeTestView: BaseView {
override func prepare() {
backgroundColor = .blue
setup()
}
private func setup(){
let lbl = LabelSL.regular()
lbl.text = "LabelSL.regular()"
addSubview(lbl)
lbl.centerXAnchor.constraint(equalTo: centerXAnchor).isActive = true
lbl.centerYAnchor.constraint(equalTo: centerYAnchor).isActive = true
}
}
That view showing correctly when treated as UIView.
Finally, i tried:
let newSlideFrame = CGRect(x: CGFloat(i) * event.expectedWidth(),
y: 0,
width: event.expectedWidth(),
height: frame.size.height)
let imgView = UIImageView()
imgView.contentMode = .scaleAspectFit
imgView.frame = newSlideFrame
imgView.image = FakeTestView().asImage()
scroll.addSubview(imgView)
But there is nothing showing. Code from above work when i try to add UIView, or UIImageView with sample images.