Manim Zoom Not Preserving Line Thickness? - manim

Here is a modified version of some example code, which is from the bottom of this page. The only change is that I've added a line:
from manimlib.imports import *
class ZoomedSceneExample(ZoomedScene):
CONFIG = {
"zoom_factor": 0.3,
"zoomed_display_height": 1,
"zoomed_display_width": 6,
"image_frame_stroke_width": 20,
"zoomed_camera_config": {
"default_frame_stroke_width": 3,
},
}
def construct(self):
# Set objects
dot = Dot().shift(UL*2)
a_line = Line((0,0,0),(1,1,0)).shift(UL*2) ## THIS IS THE ONLY CHANGE
self.add(a_line) ## TO THE EXAMPLE CODE
image=ImageMobject(np.uint8([[ 0, 100,30 , 200],
[255,0,5 , 33]]))
image.set_height(7)
frame_text=TextMobject("Frame",color=PURPLE).scale(1.4)
zoomed_camera_text=TextMobject("Zommed camera",color=RED).scale(1.4)
self.add(image,dot)
# Set camera
zoomed_camera = self.zoomed_camera
zoomed_display = self.zoomed_display
frame = zoomed_camera.frame
zoomed_display_frame = zoomed_display.display_frame
frame.move_to(dot)
frame.set_color(PURPLE)
zoomed_display_frame.set_color(RED)
zoomed_display.shift(DOWN)
# brackground zoomed_display
zd_rect = BackgroundRectangle(
zoomed_display,
fill_opacity=0,
buff=MED_SMALL_BUFF,
)
self.add_foreground_mobject(zd_rect)
# animation of unfold camera
unfold_camera = UpdateFromFunc(
zd_rect,
lambda rect: rect.replace(zoomed_display)
)
frame_text.next_to(frame,DOWN)
self.play(
ShowCreation(frame),
FadeInFromDown(frame_text)
)
# Activate zooming
self.activate_zooming()
self.play(
# You have to add this line
self.get_zoomed_display_pop_out_animation(),
unfold_camera
)
zoomed_camera_text.next_to(zoomed_display_frame,DOWN)
self.play(FadeInFromDown(zoomed_camera_text))
# Scale in x y z
scale_factor=[0.5,1.5,0]
# Resize the frame and zoomed camera
self.play(
frame.scale, scale_factor,
zoomed_display.scale, scale_factor,
FadeOut(zoomed_camera_text),
FadeOut(frame_text)
)
# Resize the frame
self.play(
frame.scale,3,
frame.shift,2.5*DOWN
)
# Resize zoomed camera
self.play(
ScaleInPlace(zoomed_display,2)
)
self.wait()
self.play(
self.get_zoomed_display_pop_out_animation(),
unfold_camera,
# -------> Inverse
rate_func=lambda t: smooth(1-t),
)
self.play(
Uncreate(zoomed_display_frame),
FadeOut(frame),
)
self.wait()
When I render this with Manim, I find that the zooming does not work as I expected. Here is a screenshot from the video:
When not zoomed in, the line is quite thick. But in the zoomed in display, the line appears significantly thinner. Why is this? Can I fix this effect, so that line thickness scales properly with zoom?

Try this:
# Set camera
zoomed_camera = self.zoomed_camera
zoomed_camera.cairo_line_width_multiple = 0.1 # <-
The thickness is calculated in this way.

Related

Is it common for sift.compute to eliminate almost all key points generated by shift.detect?

I am trying to align multispectral drone images using opencv, and when I try and use homography to align the images I get an error stating I need at least 4 matching points. I went back and broke up the sift.detectandcompute function into two separate lines and printed the number of detected points after each. after sift.detect I had over 100,000 points, when I ran sift.compute, it eliminated that number down to just two. Is there a way to make it less restrictive?
Ive included my code below in case that helps.
import cv2
import numpy as np
import os
def final_align(file_paths):
# Load the images from the file paths
images = [cv2.imread(file_path) for file_path in file_paths]
# Define the calibrated optical centers for each image
calibrated_optical_centers = {
"1": (834.056702, 643.766418),
"2": (836.952271, 631.696899),
"3": (832.183411, 642.485901),
"4": (795.311279, 680.615906),
"5": (807.490295, 685.338379),
}
# Create a list to store the aligned images
aligned_images = []
for file_path in file_paths:
# Get the 5th from last character in the file path
image_id = file_path[-5]
# Get the calibrated optical center for the image
calibrated_optical_center = calibrated_optical_centers[image_id]
# Load the image
image = cv2.imread(file_path)
# Get the shape of the image
height, width = image.shape[:2]
# Calculate the center of the image
center_x = width // 2
center_y = height // 2
# Calculate the shift needed to align the image
shift_x = float(calibrated_optical_center[0] - center_x)
shift_y = float(calibrated_optical_center[1] - center_y)
# Create a translation matrix
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
# Apply the translation to the image
aligned_image = cv2.warpAffine(image, M, (width, height))
# Add the aligned image to the list of aligned images
aligned_images.append(aligned_image)
return aligned_images
file_paths = [
"G:\Shared\Mulitband\\flights\\flight_1\\2611_DJI_0011.TIF",
"G:\Shared\Mulitband\\flights\\flight_1\\2612_DJI_0012.TIF",
"G:\Shared\Mulitband\\flights\\flight_1\\2613_DJI_0013.TIF",
"G:\Shared\Mulitband\\flights\\flight_1\\2614_DJI_0014.TIF",
"G:\Shared\Mulitband\\flights\\flight_1\\2615_DJI_0015.TIF",
]
# Call the final_align function
final_aligned_images = final_align(file_paths)
# Get the center of the first image
height, width = final_aligned_images[0].shape[:2]
center_y = height // 2
center_x = width // 2
# Specify the crop size in the y and x direction
crop_y = 1220
crop_x = 1520
#crop function
def crop_images(final_aligned_images, center_y, center_x, crop_y, crop_x):
cropped_images = []
for image in final_aligned_images:
height, width = image.shape[:2]
start_y = center_y - crop_y // 2
end_y = center_y + crop_y // 2 + 1
start_x = center_x - crop_x // 2
end_x = center_x + crop_x // 2 + 1
cropped_image = image[start_y:end_y, start_x:end_x]
cropped_images.append(cropped_image)
return cropped_images
cropped_images = crop_images(final_aligned_images, center_y, center_x, crop_y, crop_x)
#print(cropped_images)
for i, final_complete_image in enumerate(cropped_images):
# Create the Results/aligned directory if it doesn't exist
os.makedirs("G:\Shared\Mulitband\Results\\aligned", exist_ok=True)
# Construct the file path for the aligned image
final_aligned_image_path = "G:\Shared\Mulitband\Results\\aligned\\aligned_{}.tif".format(i)
# Save the final aligned image to the file path
cv2.imwrite(final_aligned_image_path, final_complete_image)
"""
# TEST OF FUNCTION
img = cropped_images[1]
# Call the sift_align function
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(cropped_images[1], None)
img=cv2.drawKeypoints(cropped_images[1] ,
kp ,
img,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('G:\Shared\Mulitband\Results\\aligned\image-with-keypoints.jpg', img)"""
#Create the SIFT Function
def sift_align(cropped_images):
# Create the SIFT detector and descriptor
sift = cv2.SIFT_create()
# Create a list to store the aligned images
aligned_images = []
# Choose the first image as the reference image
reference_image = cropped_images[0]
# reference_image = cv2.convertScaleAbs(reference_image, alpha=(255.0/65535.0))
# Detect the keypoints and compute the descriptors for the reference image ", reference_descriptors"
reference_keypoints = sift.detect(reference_image, None)
reference_keypoints = sift.compute(reference_image, reference_keypoints)
print("Number of keypoints in reference image:", len(reference_keypoints))
# Iterate over the remaining images
for i, image in enumerate(cropped_images[1:]):
# Detect the keypoints and compute the descriptors for the current image
image_keypoints, = sift.detect(image, None)
# Use the BFMatcher to find the best matches between the reference and current image descriptors
bf = cv2.BFMatcher()
# matches = bf.match(image_descriptors, image_descriptors)
# Sort the matches based on their distances
matches = sorted(matches, key = lambda x:x.distance)
# Use the best matches to estimate the homography between the reference and current image
src_pts = np.float32([reference_keypoints[m.queryIdx].pt for m in matches[:50]]).reshape(-1,1,2)
dst_pts = np.float32([image_keypoints[m.trainIdx].pt for m in matches[:50]]).reshape(-1,1,2)
homography, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# Use the homography to align the current image with the reference image
aligned_image = cv2.warpPerspective(image, homography, (reference_image.shape[1], reference_image.shape[0]))
# Add the aligned image to the list of aligned images
aligned_images.append(aligned_image)
# Stack the aligned images along the third dimension
aligned_images = np.stack(aligned_images, axis=-1)
return aligned_images
final_complete_images = sift_align(cropped_images)
"""# Save the final aligned images to the Results/aligned directory
for i, final_complete_image in enumerate(final_complete_images):
# Create the Results/aligned directory if it doesn't exist
os.makedirs("G:\Shared\Mulitband\Results\\aligned", exist_ok=True)
# Construct the file path for the aligned image
final_aligned_image_path = "G:\Shared\Mulitband\Results\\aligned\\aligned_{}.tif".format(i)
# Save the final aligned image to the file path
cv2.imwrite(final_aligned_image_path, final_complete_image)"""

NumberPlane grid not showing ManimGL Scene

I recently started using ManimGL version 1.6.1
When I ran the "OpeningManimExample" scene from the example_scenes.py,
there should be a grid in the scene. But it shows only the labels, there are no grid lines.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_stroke(BLACK, 10, background=True)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_stroke(BLACK, 5, background=True)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
Output is like this:
OpeningManimExample
I also found similar problem in Flash, FlashAround animations.

Vips - add text on top of image, after resize in Ruby

I'm using Vips to resize images via Shrine, hoping it's possible to use the Vips library to merge a layer of text on top of the image.
ImageProcessing::Vips.source(image).resize_to_fill!(width, height)
This code works great, how can I add a layer of text after resize_to_fill?
The goal is to write 'Hello world' in white text, with a CSS text-shadow in the center of the image.
I've tried writing something like this, but I'm only getting errors so far:
Vips\Image::text('Hello world!', ['font' => 'sans 120', 'width' => $image->width - 100]);
Your example looks like PHP -- in Ruby you'd write something like:
text = Vips::Image.text 'Hello world!', font: 'sans 120', width: image.width - 100
I made a demo for you:
#!/usr/bin/ruby
require "vips"
image = Vips::Image.new_from_file ARGV[0], access: :sequential
text_colour = [255, 128, 128]
shadow_colour = [128, 255, 128]
h_shadow = 2
v_shadow = 5
blur_radius = 10
# position to render the top-left of the text
text_left = 100
text_top = 200
# render some text ... this will make a one-band uchar image, with 0
# for black, 255 for white and intermediate values for anti-aliasing
text_mask = Vips::Image.text "Hello world!", dpi: 300
# we need to enlarge the text mask before we blur so that the soft edges
# don't get clipped
shadow_mask = text_mask.embed(blur_radius, blur_radius,
text_mask.width + 2 * blur_radius,
text_mask.height + 2 * blur_radius)
# gaussblur() takes sigma as a parameter -- approximate as radius / 2
shadow_mask = shadow_mask.gaussblur(blur_radius / 2) if blur_radius > 0.1
# make an RGB image the size of the text mask with each pixel set to the
# constant, then attach the text mask as the alpha
rgb = text_mask.new_from_image(text_colour).copy(interpretation: "srgb")
text = rgb.bandjoin(text_mask)
rgb = shadow_mask.new_from_image(shadow_colour).copy(interpretation: "srgb")
shadow = rgb.bandjoin(shadow_mask)
# composite the three layers together
image = image.composite([shadow, text], "over",
x: [text_left + h_shadow, text_left],
y: [text_top + v_shadow, text_top])
image.write_to_file ARGV[1]
Run like this:
$ ./try319.rb ~/pics/PNG_transparency_demonstration_1.png x.png
To make:

The line rotate with unexpected scaling

The scene is simple, one Line and rotate PI/2 with below code:
ln = Line(ORIGIN, RIGHT*2)
self.add(ln)
self.wait()
self.play(ApplyMethod(ln.rotate, PI/2, OUT))
However, during the rotating, seemingly scaling at the same time, I check the axis is [0 0 1] that is z axis, I suppose the length of the line should be kept unchanged.
How to prevent the line from scaling? Thanks!
Use Rotate or Rotating, see this.
class RotateVector(Scene):
def construct(self):
coord_start=[1,1,0]
coord_end=[2,3,0]
dot_start=Dot().move_to(coord_start)
dot_end=Dot().move_to(coord_end)
vector=Arrow(coord_start,coord_end,buff=0)
vector.set_color(RED)
self.add(dot_start,dot_end)
self.play(GrowArrow(vector))
self.play(
Rotating(
vector,
radians=PI*2,
about_point=coord_start,
rate_func=smooth,
run_time=1
)
)
self.wait()
self.play(
Rotating(
vector,
radians=PI*2,
about_point=coord_end,
rate_func=linear,
run_time=1
)
)
self.wait()
Edits
You can create a custom animation:
class RotatingAndMove(Animation):
CONFIG = {
"axis": OUT,
"radians": TAU,
"run_time": 5,
"rate_func": linear,
"about_point": None,
"about_edge": None,
}
def __init__(self, mobject, direction,**kwargs):
assert(isinstance(mobject, Mobject))
digest_config(self, kwargs)
self.mobject = mobject
self.direction = direction
def interpolate_mobject(self, alpha):
self.mobject.become(self.starting_mobject)
self.mobject.rotate(
alpha * self.radians,
axis=self.axis,
about_point=self.about_point,
about_edge=self.about_edge,
)
self.mobject.shift(alpha*self.direction)
class NewSceneRotate(Scene):
def construct(self):
arrow=Vector(UP)
arrow.to_corner(UL)
self.play(GrowArrow(arrow))
self.play(
RotatingAndMove(arrow,RIGHT*12+DOWN*4)
)
self.wait()
Or you can use UpdateFromAlphaFunc:
class NewSceneRotateUpdate(Scene):
def construct(self):
arrow=Vector(UP)
arrow.to_corner(UL)
direction=RIGHT*12+DOWN*4
radians=TAU
arrow.starting_mobject=arrow.copy()
def update_arrow(mob,alpha):
mob.become(mob.starting_mobject)
mob.rotate(alpha*radians)
mob.shift(alpha*direction)
self.play(GrowArrow(arrow))
self.play(
UpdateFromAlphaFunc(arrow,update_arrow,rate_func=linear,run_time=5)
)
self.wait()
Something that should be very clear is that when you define an update function, it is not the same to use dt as alpha. That is, it is not the same to define
def update_function(mob,dt)
as
def update_function(mob,alpha)
dt varies with the fps of the video, and is calculated as follows:
dt = 1/self.camera.frame_rate
# You don't have to calculate it, manim already does it by default,
# I just write it so you know where it comes from.
Where self refers to the Scene class.
And alpha varies from 0 to 1, in fact, you can write the previus scene with this update method and get the same result:
def update_arrow(mob,dt):
alpha=interpolate(0,1,dt)
mob.become(mob.starting_mobject)
mob.rotate(alpha*radians)
mob.shift(alpha*direction)
This can be useful if you want alpha to vary in different intervals, use
alpha=interpolate(alpha_start,alpha_end,dt)

How to FadeOut the axes in GraphScene?

In GraphScene, using setup_axes to setup the axes, How to FadeOut the axes to prepare the room for other animation?
After setup_axes, I try to change the graph_origin to move the axes, also failed.
Technically you can do it using the self.axes object:
class Plot(GraphScene):
CONFIG = {
"y_max" : 50,
"y_min" : 0,
"x_max" : 7,
"x_min" : 0,
"y_tick_frequency" : 5,
"x_tick_frequency" : 0.5,
}
def construct(self):
self.setup_axes()
graph = self.get_graph(lambda x : x**2,
color = GREEN,
x_min = 0,
x_max = 7
)
self.play(
ShowCreation(graph),
run_time = 2
)
self.wait()
self.play(FadeOut(self.axes))
self.wait()
But, GraphScene was intended to be used once for each axes (you can create multiple graphs on the axes, but not change the axes), if you are going to be changing them then use Scene, here is an example:
class Plot2(Scene):
def construct(self):
c1 = FunctionGraph(lambda x: 2*np.exp(-2*(x-1)**2))
c2 = FunctionGraph(lambda x: x**2)
axes1=Axes(y_min=-3,y_max=3)
axes2=Axes(y_min=0,y_max=10)
self.play(ShowCreation(axes1),ShowCreation(c1))
self.wait()
self.play(
ReplacementTransform(axes1,axes2),
ReplacementTransform(c1,c2)
)
self.wait()
However, in case you want to make a very personalized graph, you will have to add more options to the axes, and the axes are created using NumberLine. This is not so easy to do but you can use the manimlib/scene/graph_scene.py example to guide you, the Axes code is in manimlib/mobject/coordinate_systems.py, the NumberLine code is in manimlib/mobject/number_line.py and the FunctionGraph code is in manimlib/mobject/functions.py to see more options.

Resources