Is there a way to hide the content overflow the canvas size? - android-jetpack-compose

I set a size for the canvas and used drawCircle. Since the circle is larger than the canvas, the circle will appear to be out of bounds.
class MainActivity : ComponentActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContent {
MyApplicationTheme {
// A surface container using the 'background' color from the theme
Box(
contentAlignment = Alignment.Center,
modifier = Modifier
.fillMaxSize()
.background(Color.Black)
) {
Canvas(modifier = Modifier.size(300.dp).background(Color.Blue)){
drawCircle(
color = Color.White,
center = Offset(x = size.width / 2f, y = size.height / 2f),
radius = (size.minDimension / 2f)+50
)
}
}
}
}
}
}
enter image description here
I expect the part of overflow will hide,just like this:
enter image description here
what should I do?

Modifie.clipToBounds() clips any drawing that is outside of Composable
/**
* Clip the content to the bounds of a layer defined at this modifier.
*/
#Stable
fun Modifier.clipToBounds() = graphicsLayer(clip = true)
which is Modifier.graphicsLayer{clip = true} under the hood that you can use as
Canvas(modifier = Modifier
.clipToBounds()
.size(300.dp).background(Color.Blue)){
drawCircle(
color = Color.White,
center = Offset(x = size.width / 2f, y = size.height / 2f),
radius = (size.minDimension / 2f)+50
)
}

You can use something like this:
// A surface container using the 'background' color from the theme
Box(
contentAlignment = Alignment.Center,
modifier = Modifier
.fillMaxSize()
.background(Color.Black)
) {
Canvas(
modifier = Modifier
.size(300.dp)
.background(Color.Blue)
){
clipRect {
drawCircle(
color = Color.White,
center = Offset(x = size.width / 2f, y = size.height / 2f),
radius = (size.minDimension / 2f)+50
)
}
}
}

Related

Rounded corners on inside in Jetpack Compose

Is there an options to round the corners on inside like in the picture? I have tried with CutCornerShape but it cuts them straight.
thy something like this
#Composable
private fun DrawTicketPathWithArc() {
Canvas(modifier = canvasModifier) {
val canvasWidth = size.width
val canvasHeight = size.height
// Black background
val ticketBackgroundWidth = canvasWidth * .8f
val horizontalSpace = (canvasWidth - ticketBackgroundWidth) / 2
val ticketBackgroundHeight = canvasHeight * .8f
val verticalSpace = (canvasHeight - ticketBackgroundHeight) / 2
// Get ticket path for background
val path1 = ticketPath(
topLeft = Offset(horizontalSpace, verticalSpace),
size = Size(ticketBackgroundWidth, ticketBackgroundHeight),
cornerRadius = 20.dp.toPx()
)
drawPath(path1, color = Color.Black)
// Dashed path in foreground
val ticketForegroundWidth = ticketBackgroundWidth * .95f
val horizontalSpace2 = (canvasWidth - ticketForegroundWidth) / 2
val ticketForegroundHeight = ticketBackgroundHeight * .9f
val verticalSpace2 = (canvasHeight - ticketForegroundHeight) / 2
// Get ticket path for background
val path2 = ticketPath(
topLeft = Offset(horizontalSpace2, verticalSpace2),
size = Size(ticketForegroundWidth, ticketForegroundHeight),
cornerRadius = 20.dp.toPx()
)
drawPath(
path2,
color = Color.Red,
style = Stroke(
width = 2.dp.toPx(),
pathEffect = PathEffect.dashPathEffect(
floatArrayOf(20f, 20f)
)
)
)
}
}
check this page
I found a solution which looks like that
#Composable
private fun DrawDashLine() {
val pathEffect = PathEffect.dashPathEffect(floatArrayOf(12f, 10f), 0f)
Canvas(modifier = Modifier.fillMaxWidth()) {
drawLine(
color = Color.Black,
strokeWidth = 3f,
start = Offset(0f, 0f),
end = Offset(size.width, 0f),
pathEffect = pathEffect
)
drawCircle(
color = Color.Black,
center = Offset(x = 1f, y = 2f),
radius = 20f
)
drawCircle(
color = Color.Black,
center = Offset(x = size.width, y = 2f),
radius = 20f
)
}
}
The background must be black, and the front is white and the round circles just come above the white.

How to correctly drag the view around the circle?

I want the green dot to follow the touch point in a circular path, but it doesn't seem to be doing it right.
It seems like there is an unwanted offset somewhere but I can't find it on my own for quite some time.
Here is my code:
#Preview
#Composable
fun Test() {
val touchPoint = remember { mutableStateOf(Offset.Zero) }
Scaffold {
Column() {
Box(Modifier.height(100.dp).fillMaxWidth().background(Color.Blue))
Layout(
modifier = Modifier.aspectRatio(1f).fillMaxSize(),
content = {
Box(
Modifier
.size(48.dp)
.clip(CircleShape)
.background(Color.Green)
.pointerInput(Unit) {
detectDragGestures(
onDrag = { change, dragAmount ->
change.consumeAllChanges()
touchPoint.value += dragAmount
}
)
}
)
}
) { measurables, constraints ->
val dot = measurables.first().measure(constraints.copy(minHeight = 0, minWidth = 0))
val width = constraints.maxWidth
val height = constraints.maxHeight
val centerX = width / 2
val centerY = height / 2
val lengthFromCenter = width / 2 - dot.width / 2
val touchX = touchPoint.value.x
val touchY = touchPoint.value.y
layout(width, height) {
// I planned to achieve the desired behaviour with the following steps:
// 1. Convert cartesian coordinates to polar ones
val r = sqrt(touchX.pow(2) + touchY.pow(2))
val angle = atan2(touchY.toDouble(), touchX.toDouble())
// 2. Use fixed polar radius
val rFixed = lengthFromCenter
// 3. Convert it back to cartesian coordinates
val x = rFixed * cos(angle)
val y = rFixed * sin(angle)
// 4. Layout on screen
dot.place(
x = (x + centerX - dot.width / 2).roundToInt(),
y = (y + centerY - dot.height / 2).roundToInt()
)
}
}
Box(Modifier.fillMaxSize().background(Color.Blue))
}
}
}
I'm definitely missing something but don't know what exactly. What am I doing wrong?
touchPoint.value += dragAmount
Is in pixel values, and you're updating the position of the dot with pixel values, where it requires dp values. If you update that with
private fun Float.pxToDp(context: Context): Dp = // Float or Int, depends on the value you have, or Double
(this / context.resources.displayMetrics.density).dp
The amount with which it will be moved, will be smaller and reflect the dragging made by the user
You can easily achieve this by using some math:
#Composable
fun CircularView(
content: #Composable () -> Unit
) {
var middle by remember {
mutableStateOf(Offset.Zero)
}
var size by remember {
mutableStateOf(0.dp)
}
var dragAngle by remember {
mutableStateOf(0f)
}
Canvas(modifier = Modifier.size(size)) {
drawCircle(
color = Color.Red,
center = middle,
style = Stroke(1.dp.toPx())
)
}
Layout(
content = content,
modifier = Modifier.pointerInput(true) {
detectDragGestures(
onDrag = { change, _ ->
change.consumeAllChanges()
val positionOfDrag = change.position
val previousPosition = change.previousPosition
dragAngle += atan2(
positionOfDrag.x - middle.x,
positionOfDrag.y - middle.y
) - atan2(
previousPosition.x - middle.x,
previousPosition.y - middle.y
)
}
)
}
) { measurables, constraints ->
val placeables = measurables.map { it.measure(constraints) }
val layoutWidth = constraints.maxWidth
val layoutHeight = constraints.maxHeight
layout(layoutWidth, layoutHeight) {
val childCount = placeables.size
if (childCount == 0) return#layout
val middleX = layoutWidth / 2f
val middleY = layoutHeight / 2f
middle = Offset(middleX, middleY)
val angleBetween = 2 * PI / childCount
val radius =
min(
layoutWidth - (placeables.maxByOrNull { it.width }?.width ?: 0),
layoutHeight - (placeables.maxByOrNull { it.height }?.height ?: 0)
) / 2
size = (radius * 2).toDp()
placeables.forEachIndexed { index, placeable ->
val angle = index * angleBetween - PI / 2 - dragAngle
val x = middleX + (radius) * cos(angle) - placeable.width / 2f
val y = middleY + (radius) * sin(angle) - placeable.height / 2f
placeable.placeRelative(x = x.toInt(), y = y.toInt())
}
}
}
}
On the calling side:
CircularView {
repeat(10) {
Box(
modifier = Modifier
.background(
Color(
red = random.nextInt(255),
green = random.nextInt(255),
blue = random.nextInt(255)
), shape = CircleShape
)
.size(50.dp),
contentAlignment = Alignment.Center
) {
Text(text = it.toString(), fontSize = 12.sp, color = Color.White)
}
}
}

react-konva How to set Shape width and height to fill it's Group container?

I want to have the same animation movement on both a Text and a Rect. I put them into a group and apply a tween on the group. However, the x and y props are inherited, width and height are not, which means the Rect's width and height don't have smooth tween. Is there any way to set Rect's width and height to equal to 100% of its Group container's width and height all the time and along with their change?
const { scaleFactor, DynamicData, isPause } = props;
const groupRef = useRef<Konva.Group>(null);
const start = DynamicData.startData;
const end = DynamicData.endData;
useEffect(() => {
if (start && end && groupRef.current) {
const animation = new Tween({
node: groupRef.current,
duration: DynamicData.endTime - DynamicData.startTime,
x: scaleFactor * end.x,
y: scaleFactor * end.y,
width: scaleFactor * end.width,
height: scaleFactor * end.height,
easing: Easings.Linear,
});
if (isPause) {
animation.pause();
} else {
animation.play();
}
}
});
return (
<>
{!_.isUndefined(end) && (
<Group
ref={groupRef}
x={scaleFactor * start.x}
y={scaleFactor * start.y}
width={scaleFactor * start.width}
height={scaleFactor * start.height}
>
<Text
text={start.concept}
/>
<Rect
stroke={start.stroke}
/>
</Group>
)}
</>
);
};
update: for now I add a new Tween on Rect to track the width and height changes:
const sizeAnimation = new Tween({
node: rectRef.current,
duration: DynamicData.endTime - DynamicData.startTime,
width: scaleFactor * end!.width,
height: scaleFactor * end!.height,
easing: Easings.Linear,
});
if (isPause) {
sizeAnimation.pause();
} else {
sizeAnimation.play();
}
width and height properties of Konva.Group do nothing. They don't affect rendering.
You have to add another tween for rectangle.

Dynamically adjust the weighting of columns in a row based on string length

I have a layout with two texts, one on left side and one on right side. If both texts are long, left one should occupy 60%, and right one 40% of the width. But if right text is shorter than 40%, the left one should take all the available space.
Here are the examples:
and:
So I would like to write something like this:
Row {
Text(text = left, modifier = modifier.padding(8.dp).weight(<min 0.6f>))
Text(text = right, modifier = modifier.padding(8.dp).weight(<max 0.4f>))
}
Is there any way to achieve this?
Finally, I figured it out. Here is the modifier:
fun Modifier.maxWidth(
fraction: Float = 1f,
) = layout { measurable, constraints ->
val maxWidth = (constraints.maxWidth * fraction).roundToInt()
val width = measurable.maxIntrinsicWidth(constraints.maxHeight).coerceAtMost(maxWidth)
val placeable = measurable.measure(Constraints(constraints.minWidth, width, constraints.minHeight, constraints.maxHeight))
layout(width, placeable.height) {
placeable.placeRelative(0, 0)
}
}
So we can use it like this:
Row {
Text(text = left, modifier = modifier.padding(8.dp).weight(1f)) // left text uses all the available space
Text(text = right, modifier = modifier.padding(8.dp).maxWidth(fraction = 0.4f)) // right text can be 40% of the parent or less
}
Measure the length of each string and calculate their weights. Adjust the weights based on the maximum allowed for the right column:
class MainActivity : ComponentActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
startActivity(intent)
val paint = android.graphics.Paint()
// First row
val text1 = "This long text should take up maximum 60% of the space with wrapping"
val text2 = "This is not very long"
val width1 = paint.measureText(text1)
val width2 = paint.measureText(text2)
var w2 = width2 / (width1 + width2)
if (w2 > 0.4f) {
w2 = 0.4f
}
val w1 = 1f - w2
// Second row
val text3 = "This text should take up more than 60% of the space"
val text4 = "Short"
val width3 = paint.measureText(text3)
val width4 = paint.measureText(text4)
var w4 = width4 / (width3 + width4)
if (w4 > 0.4f) {
w4 = 0.4f
}
val w3 = 1f - w4
setContent {
Column(modifier = Modifier.fillMaxSize()) {
Row(
modifier = Modifier
.fillMaxWidth()
.wrapContentHeight()
.padding(bottom = 20.dp)
) {
Text(text1, modifier = Modifier.weight(w1))
Text(text2, modifier = Modifier.weight(w2), color = Color.Red)
}
Row(
modifier = Modifier
.fillMaxWidth()
.wrapContentHeight()
.padding(bottom = 20.dp)
) {
Text(text3, modifier = Modifier.weight(w3))
Text(text4, modifier = Modifier.weight(w4), color = Color.Red)
}
}
}
}
}

Why augmented image tracking is glitchy in ARCore + Sceneform, and how to resolve it?

I've followed a good tutorial to build up a simple AR video overlay over a series of target images.
It works pretty well, however, especially in the first moment the image is recognised for the first time, the augmented object rendering shows some issue, such it's tilting very fast over the target image, instead of being properly anchored.
Here a video which shows the poor performance which is even worse on some actual mobile phone.
https://www.youtube.com/watch?v=QZH6eLiFaUs
And here some of the used code, in Kotlin.
This part handles the anchors
import android.util.Pair
import com.google.ar.core.Anchor
import com.google.ar.sceneform.AnchorNode
import com.google.ar.sceneform.Node
import com.google.ar.sceneform.math.Quaternion
import com.google.ar.sceneform.math.Vector3
import com.google.ar.sceneform.rendering.Renderable
import com.shliama.augmentedvideotutorial.VideoScaleType.*
import android.util.Log
class VideoAnchorNode : AnchorNode() {
private val videoNode = Node().also { it.setParent(this) }
override fun setRenderable(renderable: Renderable?) {
videoNode.renderable = renderable
}
fun setVideoProperties(
videoWidth: Float, videoHeight: Float, videoRotation: Float,
imageWidth: Float, imageHeight: Float,
videoScaleType: VideoScaleType
) {
Log.i("DEBUGDATA", "videoWidth: " + videoWidth + "/n" + "videoHeight: " + videoHeight + "/n imageWidth")
scaleNode(videoWidth, videoHeight, imageWidth, imageHeight, videoScaleType)
rotateNode(videoRotation)
}
private fun scaleNode(
videoWidth: Float, videoHeight: Float,
imageWidth: Float, imageHeight: Float,
videoScaleType: VideoScaleType
) {
videoNode.localScale = when (videoScaleType) {
FitXY -> scaleFitXY(imageWidth, imageHeight)
CenterCrop -> scaleCenterCrop(videoWidth, videoHeight, imageWidth, imageHeight)
CenterInside -> scaleCenterInside(videoWidth, videoHeight, imageWidth, imageHeight)
}
}
private fun rotateNode(videoRotation: Float) {
videoNode.localRotation = Quaternion.axisAngle(Vector3(0.0f, -1.0f, 0.0f), videoRotation)
}
private fun scaleFitXY(imageWidth: Float, imageHeight: Float) = Vector3(imageWidth, 1.0f, imageHeight)
private fun scaleCenterCrop(videoWidth: Float, videoHeight: Float, imageWidth: Float, imageHeight: Float): Vector3 {
val isVideoVertical = videoHeight > videoWidth
val videoAspectRatio = if (isVideoVertical) videoHeight / videoWidth else videoWidth / videoHeight
val imageAspectRatio = if (isVideoVertical) imageHeight / imageWidth else imageWidth / imageHeight
return if (isVideoVertical) {
if (videoAspectRatio > imageAspectRatio) {
Vector3(imageWidth, 1.0f, imageWidth * videoAspectRatio)
} else {
Vector3(imageHeight / videoAspectRatio, 1.0f, imageHeight)
}
} else {
if (videoAspectRatio > imageAspectRatio) {
Vector3(imageHeight * videoAspectRatio, 1.0f, imageHeight)
} else {
Vector3(imageWidth, 1.0f, imageWidth / videoAspectRatio)
}
}
}
private fun scaleCenterInside(videoWidth: Float, videoHeight: Float, imageWidth: Float, imageHeight: Float): Vector3 {
val isVideoVertical = videoHeight > videoWidth
val videoAspectRatio = if (isVideoVertical) videoHeight / videoWidth else videoWidth / videoHeight
val imageAspectRatio = if (isVideoVertical) imageHeight / imageWidth else imageWidth / imageHeight
return if (isVideoVertical) {
if (videoAspectRatio < imageAspectRatio) {
Vector3(imageWidth, 1.0f, imageWidth * videoAspectRatio)
} else {
Vector3(imageHeight / videoAspectRatio, 1.0f, imageHeight)
}
} else {
if (videoAspectRatio < imageAspectRatio) {
Vector3(imageHeight * videoAspectRatio, 1.0f, imageHeight)
} else {
if (videoAspectRatio == 1.0f){
Vector3(imageWidth, 1.0f, imageWidth)
}else{
Vector3(imageWidth, 1.0f, imageWidth / videoAspectRatio)
}
}
}
}
}
This part triggers the rendering, after the target matching with the inner database (.imgdb)
onUpdate
override fun onUpdate(frameTime: FrameTime) {
val frame = arSceneView.arFrame ?: return
val updatedAugmentedImages = frame.getUpdatedTrackables(AugmentedImage::class.java)
// If current active augmented image isn't tracked anymore and video playback is started - pause video playback
val nonFullTrackingImages = updatedAugmentedImages.filter { it.trackingMethod != AugmentedImage.TrackingMethod.FULL_TRACKING }
activeAugmentedImage?.let { activeAugmentedImage ->
if (isArVideoPlaying() && nonFullTrackingImages.any { it.index == activeAugmentedImage.index }) {
pauseArVideo()
}
}
val fullTrackingImages = updatedAugmentedImages.filter { it.trackingMethod == AugmentedImage.TrackingMethod.FULL_TRACKING }
if (fullTrackingImages.isEmpty()) return //here it stops
// If current active augmented image is tracked but video playback is paused - resume video playback
activeAugmentedImage?.let { activeAugmentedImage ->
if (fullTrackingImages.any { it.index == activeAugmentedImage.index }) {
if (!isArVideoPlaying()) {
resumeArVideo()
}
return
}
}
// Otherwise - make the first tracked image active and start video playback
fullTrackingImages.firstOrNull()?.let { augmentedImage ->
try {
playbackArVideo(augmentedImage)
} catch (e: Exception) {
Log.e(TAG, "Could not play video [${augmentedImage.name}]", e)
}
}
Here the "playback" part
private fun playbackArVideo(augmentedImage: AugmentedImage) {
Log.d(TAG, "playbackVideo = ${augmentedImage.name}")
requireContext().assets.openFd(augmentedImage.name)
.use { descriptor ->
val metadataRetriever = MediaMetadataRetriever()
metadataRetriever.setDataSource(
descriptor.fileDescriptor,
descriptor.startOffset,
descriptor.length
)
val videoWidth = metadataRetriever.extractMetadata(METADATA_KEY_VIDEO_WIDTH).toFloatOrNull() ?: 0f
val videoHeight = metadataRetriever.extractMetadata(METADATA_KEY_VIDEO_HEIGHT).toFloatOrNull() ?: 0f
val videoRotation = metadataRetriever.extractMetadata(METADATA_KEY_VIDEO_ROTATION).toFloatOrNull() ?: 0f
// Account for video rotation, so that scale logic math works properly
val imageSize = RectF(0f, 0f, augmentedImage.extentX, augmentedImage.extentZ)
.transform(rotationMatrix(videoRotation))
val videoScaleType = VideoScaleType.CenterCrop
videoAnchorNode.setVideoProperties(
videoWidth = videoWidth, videoHeight = videoHeight, videoRotation = videoRotation,
imageWidth = imageSize.width(), imageHeight = imageSize.height(),
videoScaleType = videoScaleType
)
// Update the material parameters
videoRenderable.material.setFloat2(MATERIAL_IMAGE_SIZE, imageSize.width(), imageSize.height())
videoRenderable.material.setFloat2(MATERIAL_VIDEO_SIZE, videoWidth, videoHeight)
videoRenderable.material.setBoolean(MATERIAL_VIDEO_CROP, VIDEO_CROP_ENABLED)
mediaPlayer.reset()
mediaPlayer.setDataSource(descriptor)
}.also {
mediaPlayer.isLooping = true
mediaPlayer.prepare()
//mediaPlayer.start()
}
videoAnchorNode.anchor?.detach()
videoAnchorNode.anchor = augmentedImage.createAnchor(augmentedImage.centerPose)
activeAugmentedImage = augmentedImage
externalTexture.surfaceTexture.setOnFrameAvailableListener {
it.setOnFrameAvailableListener(null)
fadeInVideo()
}
Here the fade-in part (optional)
private fun fadeInVideo() {
ValueAnimator.ofFloat(0f, 1f).apply {
duration = 400L
interpolator = LinearInterpolator()
addUpdateListener { v ->
videoRenderable.material.setFloat(MATERIAL_VIDEO_ALPHA, v.animatedValue as Float)
}
doOnStart { videoAnchorNode.renderable = videoRenderable }
start()
}
I figured one possible issue is a fast stop and resume of the video, a behaviour which depends of the target being hardly recognised, maybe. However the image is in HD and the valuation of the target itself scored 100/100...
Another possible cause could be the rendering starting too soon compared with the anchors.
In both cases, which could be a good solution?
Thank you in advance

Resources