mirror of
https://github.com/3b1b/manim.git
synced 2025-08-05 16:49:03 +00:00
Merge branch 'master' of https://github.com/3b1b/manim into WindingNumber
This commit is contained in:
commit
4f3bc46b5d
16 changed files with 1121 additions and 445 deletions
6
.gitignore
vendored
6
.gitignore
vendored
|
@ -7,9 +7,9 @@ special_animations.py
|
|||
prettiness_hall_of_fame.py
|
||||
files/
|
||||
ben_playground.py
|
||||
|
||||
ben_cairo_test.py
|
||||
|
||||
.floo
|
||||
.flooignore
|
||||
*.xml
|
||||
|
||||
*.iml
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ inverse_quadratic = lambda maxint,scale,cutoff: inverse_power_law(maxint,scale,c
|
|||
class AngleUpdater(ContinualAnimation):
|
||||
def __init__(self, angle_arc, spotlight, **kwargs):
|
||||
self.angle_arc = angle_arc
|
||||
self.source_point = angle_arc.get_arc_center()
|
||||
|
||||
self.spotlight = spotlight
|
||||
ContinualAnimation.__init__(self, self.angle_arc, **kwargs)
|
||||
|
||||
|
@ -66,9 +66,9 @@ class AngleUpdater(ContinualAnimation):
|
|||
new_arc = self.angle_arc.copy().set_bound_angles(
|
||||
start = self.spotlight.start_angle(),
|
||||
stop = self.spotlight.stop_angle()
|
||||
)
|
||||
)
|
||||
new_arc.generate_points()
|
||||
new_arc.move_arc_center_to(self.source_point)
|
||||
new_arc.move_arc_center_to(self.spotlight.get_source_point())
|
||||
self.angle_arc.points = new_arc.points
|
||||
self.angle_arc.add_tip(tip_length = ARC_TIP_LENGTH,
|
||||
at_start = True, at_end = True)
|
||||
|
@ -83,7 +83,9 @@ class LightIndicator(Mobject):
|
|||
"intensity": 0,
|
||||
"opacity_for_unit_intensity": 1,
|
||||
"precision": 3,
|
||||
"show_reading": True
|
||||
"show_reading": True,
|
||||
"measurement_point": ORIGIN,
|
||||
"light_source": None
|
||||
}
|
||||
|
||||
def generate_points(self):
|
||||
|
@ -105,6 +107,25 @@ class LightIndicator(Mobject):
|
|||
self.foreground.set_fill(opacity=new_opacity)
|
||||
ChangeDecimalToValue(self.reading, new_int).update(1)
|
||||
return self
|
||||
|
||||
def get_measurement_point(self):
|
||||
if self.measurement_point != None:
|
||||
return self.measurement_point
|
||||
else:
|
||||
return self.get_center()
|
||||
|
||||
|
||||
def measured_intensity(self):
|
||||
distance = np.linalg.norm(self.get_measurement_point() -
|
||||
self.light_source.get_source_point())
|
||||
intensity = self.light_source.opacity_function(distance) / self.opacity_for_unit_intensity
|
||||
return intensity
|
||||
|
||||
def continual_update(self):
|
||||
if self.light_source == None:
|
||||
print "Indicator cannot update, reason: no light source found"
|
||||
self.set_intensity(self.measured_intensity())
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -123,6 +144,12 @@ class UpdateLightIndicator(AnimationGroup):
|
|||
self.mobject = indicator
|
||||
|
||||
|
||||
class ContinualLightIndicatorUpdate(ContinualAnimation):
|
||||
|
||||
def update_mobject(self,dt):
|
||||
self.mobject.continual_update()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -581,7 +608,7 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
|
||||
self.setup_elements()
|
||||
self.setup_angle() # spotlight and angle msmt change when screen rotates
|
||||
#self.rotate_screen()
|
||||
self.rotate_screen()
|
||||
self.morph_lighthouse_into_sun()
|
||||
|
||||
|
||||
|
@ -643,8 +670,8 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
self.light_source.set_max_opacity_spotlight(0.001)
|
||||
self.add(self.light_source.spotlight)
|
||||
|
||||
updater = ScreenTracker(self.light_source)
|
||||
self.add(updater)
|
||||
self.screen_tracker = ScreenTracker(self.light_source)
|
||||
self.add(self.screen_tracker)
|
||||
|
||||
self.wait()
|
||||
|
||||
|
@ -679,7 +706,7 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
self.angle_arc = Arc(radius = 5, start_angle = self.light_source.spotlight.start_angle(),
|
||||
angle = self.light_source.spotlight.opening_angle(), tip_length = ARC_TIP_LENGTH)
|
||||
#angle_arc.add_tip(at_start = True, at_end = True)
|
||||
self.angle_arc.move_arc_center_to(self.light_source.source_point)
|
||||
self.angle_arc.move_arc_center_to(self.light_source.get_source_point())
|
||||
|
||||
|
||||
# angle msmt (decimal number)
|
||||
|
@ -727,7 +754,7 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
|
||||
|
||||
|
||||
sun_position = ORIGIN #[-100,0,0]
|
||||
sun_position = [-100,0,0]
|
||||
|
||||
|
||||
self.play(
|
||||
|
@ -737,16 +764,14 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
|
||||
self.sun = self.light_source.deepcopy()
|
||||
|
||||
#self.sun.ambient_light.opacity_function = inverse_quadratic(1,2,1)
|
||||
#self.sun.num_levels = NUM_LEVELS,
|
||||
#self.sun.set_radius(150)
|
||||
#self.sun.set_max_opacity_ambient(AMBIENT_FULL)
|
||||
|
||||
|
||||
|
||||
|
||||
# self.sun.spotlight.change_opacity_function(lambda r: 0.5)
|
||||
# self.sun.set_radius(150)
|
||||
self.sun.spotlight.change_opacity_function(lambda r: 0.5)
|
||||
self.sun.set_radius(150)
|
||||
self.sun.move_source_to(sun_position)
|
||||
|
||||
# self.sun.update()
|
||||
|
@ -755,10 +780,11 @@ class SingleLighthouseScene(PiCreatureScene):
|
|||
# temporarily remove the screen tracker while we move the source
|
||||
#self.remove(self.screen_tracker)
|
||||
|
||||
print self.sun.spotlight.source_point
|
||||
#print self.sun.spotlight.source_point
|
||||
|
||||
self.play(
|
||||
self.light_source.spotlight.move_source_to,sun_position,
|
||||
#self.light_source.spotlight.move_source_to,sun_position,
|
||||
Transform(self.light_source,self.sun)
|
||||
)
|
||||
|
||||
#self.add(ScreenTracker(self.sun))
|
||||
|
@ -921,7 +947,7 @@ class ScreenShapingScene(ThreeDScene):
|
|||
self.right_shift_screen_while_showing_light_indicator_and_distance_arrow()
|
||||
self.left_shift_again()
|
||||
|
||||
self.morph_into_3d()
|
||||
#self.morph_into_3d()
|
||||
|
||||
|
||||
def setup_elements(self):
|
||||
|
@ -952,16 +978,16 @@ class ScreenShapingScene(ThreeDScene):
|
|||
self.lighthouse = self.light_source.lighthouse
|
||||
|
||||
screen_tracker = ScreenTracker(self.light_source)
|
||||
self.add(screen_tracker)
|
||||
self.add(screen_tracker,self.light_source.shadow)
|
||||
|
||||
self.add_foreground_mobject(self.light_source.shadow)
|
||||
#self.add_foreground_mobject(self.light_source.shadow)
|
||||
|
||||
# Morty
|
||||
self.morty = Mortimer().scale(0.3).next_to(self.screen, RIGHT, buff = 0.5)
|
||||
|
||||
# Add everything to the scene
|
||||
self.add(self.ambient_light, self.lighthouse)
|
||||
self.add_foreground_mobject(self.morty)
|
||||
#self.add_foreground_mobject(self.morty)
|
||||
|
||||
self.wait()
|
||||
self.play(FadeIn(self.screen))
|
||||
|
@ -971,13 +997,12 @@ class ScreenShapingScene(ThreeDScene):
|
|||
|
||||
dimmed_ambient_light = self.ambient_light.copy()
|
||||
dimmed_ambient_light.dimming(AMBIENT_DIMMED)
|
||||
self.light_source.set_max_opacity_spotlight(0.001)
|
||||
|
||||
#self.light_source.set_max_opacity_spotlight(0.001)
|
||||
self.play(
|
||||
self.light_source.set_max_opacity_spotlight,1.0, # this hides Morty for a moment, why?
|
||||
Transform(self.ambient_light,dimmed_ambient_light),
|
||||
FadeIn(self.light_source.shadow),
|
||||
Transform(self.ambient_light,dimmed_ambient_light)
|
||||
)
|
||||
)
|
||||
|
||||
self.wait()
|
||||
|
||||
|
@ -1180,9 +1205,6 @@ class ScreenShapingScene(ThreeDScene):
|
|||
dphi = phi1 - phi0
|
||||
dtheta = theta1 - theta0
|
||||
|
||||
print "moving camera from (", phi0/DEGREES, ", ", theta0/DEGREES, ") to (", phi1/DEGREES, ", ", theta1/DEGREES, ")"
|
||||
|
||||
|
||||
camera_target_point = target_point # self.camera.get_spherical_coords(45 * DEGREES, -60 * DEGREES)
|
||||
projection_direction = self.camera.spherical_coords_to_point(phi1,theta1, 1)
|
||||
|
||||
|
@ -1212,7 +1234,7 @@ class BackToEulerSumScene(PiCreatureScene):
|
|||
|
||||
|
||||
def construct(self):
|
||||
#self.remove(self.get_primary_pi_creature())
|
||||
self.remove(self.get_primary_pi_creature())
|
||||
|
||||
NUM_CONES = 7
|
||||
NUM_VISIBLE_CONES = 6
|
||||
|
@ -1233,7 +1255,7 @@ class BackToEulerSumScene(PiCreatureScene):
|
|||
)
|
||||
|
||||
self.number_line.label_direction = DOWN
|
||||
self.number_line.shift(3*UP)
|
||||
#self.number_line.shift(3*UP)
|
||||
|
||||
self.number_line_labels = self.number_line.get_number_mobjects()
|
||||
self.add(self.number_line,self.number_line_labels)
|
||||
|
@ -1257,7 +1279,7 @@ class BackToEulerSumScene(PiCreatureScene):
|
|||
bubble = ThoughtBubble(direction = RIGHT,
|
||||
width = 4, height = 3,
|
||||
file_name = "Bubbles_thought.svg")
|
||||
bubble.next_to(randy,LEFT)
|
||||
bubble.next_to(randy,LEFT+UP)
|
||||
bubble.set_fill(color = BLACK, opacity = 1)
|
||||
|
||||
self.play(
|
||||
|
@ -1289,171 +1311,137 @@ class BackToEulerSumScene(PiCreatureScene):
|
|||
v = point - self.number_line.number_to_point(0)
|
||||
light_source = LightSource()
|
||||
light_source.move_source_to(point)
|
||||
light_source.ambient_light.move_source_to(point)
|
||||
light_source.lighthouse.move_to(point)
|
||||
#light_source.ambient_light.move_source_to(point)
|
||||
#light_source.lighthouse.move_to(point)
|
||||
|
||||
self.play(FadeIn(light_source.lighthouse))
|
||||
self.play(SwitchOn(light_source.ambient_light))
|
||||
|
||||
|
||||
# create an indicator and move a copy of it into the thought bubble
|
||||
# create an indicator that will move along the number line
|
||||
indicator = LightIndicator(color = LIGHT_COLOR,
|
||||
radius = INDICATOR_RADIUS,
|
||||
opacity_for_unit_intensity = 0.2, #OPACITY_FOR_UNIT_INTENSITY,
|
||||
opacity_for_unit_intensity = OPACITY_FOR_UNIT_INTENSITY,
|
||||
show_reading = False
|
||||
)
|
||||
)
|
||||
indicator_reading = euler_sum[0]
|
||||
indicator_reading.scale_to_fit_height(0.5 * indicator.get_height())
|
||||
indicator_reading.move_to(indicator.get_center())
|
||||
indicator.add(indicator_reading)
|
||||
indicator.tex_reading = indicator_reading
|
||||
# the TeX reading is too bright at full intensity
|
||||
indicator.tex_reading.set_fill(color = BLACK)
|
||||
indicator.foreground.set_fill(None,opacities[0])
|
||||
|
||||
|
||||
indicator.move_to(point)
|
||||
indicator.set_intensity(intensities[0])
|
||||
|
||||
self.play(FadeIn(indicator))
|
||||
indicator_copy = indicator.deepcopy()
|
||||
self.add(indicator_copy)
|
||||
self.play(indicator_copy.move_to, bubble)
|
||||
|
||||
moving_light_source = light_source.deepcopy()
|
||||
|
||||
|
||||
ls = []
|
||||
ls.append(moving_light_source)
|
||||
self.add_foreground_mobject(indicator)
|
||||
|
||||
collection_point = np.array([-6.,2.,0.])
|
||||
left_shift = 0.2*LEFT
|
||||
collected_indicators = Mobject()
|
||||
|
||||
for i in range(2,NUM_VISIBLE_CONES + 1):
|
||||
|
||||
for i in range(2, NUM_VISIBLE_CONES + 1):
|
||||
|
||||
previous_point = self.number_line.number_to_point(i - 1)
|
||||
point = self.number_line.number_to_point(i)
|
||||
|
||||
indicator_copy = indicator.deepcopy()
|
||||
indicator_copy.move_to(previous_point)
|
||||
self.add(indicator_copy)
|
||||
|
||||
|
||||
v = point - previous_point
|
||||
#print v
|
||||
# Create and position the target indicator (next on number line).
|
||||
indicator_target = indicator.deepcopy()
|
||||
indicator_target.move_to(point)
|
||||
indicator_target.shift(v)
|
||||
|
||||
ls[-1].set_max_opacity_ambient(0.0001)
|
||||
self.add(ls[-1].ambient_light)
|
||||
ls.append(ls[-1].deepcopy())
|
||||
|
||||
ls[-1].move_source_to(point)
|
||||
ls[-1].set_max_opacity_ambient(0.5)
|
||||
|
||||
bubble_indicator = indicator_copy.deepcopy()
|
||||
# Here we make a copy that will move into the thought bubble.
|
||||
bubble_indicator = indicator.deepcopy()
|
||||
# And its target
|
||||
bubble_indicator_target = bubble_indicator.deepcopy()
|
||||
bubble_indicator_target.set_intensity(intensities[i-1])
|
||||
bubble_indicator_target.reading = euler_sum[-2+2*i]
|
||||
bubble_indicator_target.reading.scale_to_fit_height(0.8*indicator.get_height())
|
||||
bubble_indicator_target.move_to(bubble)
|
||||
bubble_indicator_target.set_intensity(intensities[i - 2])
|
||||
|
||||
self.add(bubble_indicator)
|
||||
# give the target the appropriate reading
|
||||
euler_sum[2*i-4].move_to(bubble_indicator_target)
|
||||
bubble_indicator_target.remove(bubble_indicator_target.tex_reading)
|
||||
bubble_indicator_target.tex_reading = euler_sum[2*i-4].copy()
|
||||
bubble_indicator_target.add(bubble_indicator_target.tex_reading)
|
||||
# center it in the indicator
|
||||
|
||||
if bubble_indicator_target.tex_reading.get_tex_string() != "1":
|
||||
bubble_indicator_target.tex_reading.scale_to_fit_height(0.8*indicator.get_height())
|
||||
# the target is less bright, possibly switch to a white text color
|
||||
if bubble_indicator_target.intensity < 0.7:
|
||||
bubble_indicator.tex_reading.set_fill(color = WHITE)
|
||||
|
||||
# position the target in the thought bubble
|
||||
bubble_indicator_target.move_to(collection_point)
|
||||
|
||||
|
||||
self.add_foreground_mobject(bubble_indicator)
|
||||
|
||||
|
||||
self.wait()
|
||||
|
||||
self.play(
|
||||
Transform(bubble_indicator,bubble_indicator_target)
|
||||
Transform(bubble_indicator,bubble_indicator_target),
|
||||
collected_indicators.shift,left_shift,
|
||||
)
|
||||
|
||||
self.play(
|
||||
Transform(ls[-2], ls[-1]),
|
||||
Transform(indicator_copy,indicator_target),
|
||||
collected_indicators.add(bubble_indicator)
|
||||
|
||||
new_light = light_source.deepcopy()
|
||||
w = new_light.get_source_point()
|
||||
new_light.move_source_to(w + (i-2)*v)
|
||||
w2 = new_light.get_source_point()
|
||||
|
||||
self.add(new_light.lighthouse)
|
||||
self.play(
|
||||
Transform(indicator,indicator_target),
|
||||
new_light.lighthouse.shift,v,
|
||||
)
|
||||
new_light.move_source_to(w + (i-1)*v)
|
||||
new_light.lighthouse.move_to(w + (i-1)*v)
|
||||
|
||||
self.play(SwitchOn(new_light.ambient_light),
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
# quickly switch on off-screen light cones
|
||||
for i in range(NUM_VISIBLE_CONES,NUM_CONES):
|
||||
indicator_start_time = 0.5 * (i+1) * FAST_SWITCH_ON_RUN_TIME/light_source.ambient_light.radius * self.number_line.unit_size
|
||||
indicator_stop_time = indicator_start_time + FAST_INDICATOR_UPDATE_TIME
|
||||
indicator_rate_func = squish_rate_func(#smooth, 0.8, 0.9)
|
||||
smooth,indicator_start_time,indicator_stop_time)
|
||||
ls = LightSource()
|
||||
point = point = self.number_line.number_to_point(i)
|
||||
ls.move_source_to(point)
|
||||
self.play(
|
||||
SwitchOn(ls.ambient_light, run_time = FAST_SWITCH_ON_RUN_TIME),
|
||||
)
|
||||
|
||||
# switch on lights off-screen
|
||||
# while writing an ellipsis in the series
|
||||
# and fading out the stack of indicators
|
||||
# and fading in pi^2/6 instead
|
||||
# move a copy of pi^2/6 down to the series
|
||||
# ans fade in an equals sign
|
||||
# and morph indicator stack into limit value
|
||||
|
||||
sum_indicator = LightIndicator(color = LIGHT_COLOR,
|
||||
radius = INDICATOR_RADIUS,
|
||||
opacity_for_unit_intensity = OPACITY_FOR_UNIT_INTENSITY,
|
||||
show_reading = False
|
||||
)
|
||||
sum_indicator.set_intensity(intensities[0] * np.pi**2/6)
|
||||
sum_indicator_reading = TexMobject("{\pi^2 \over 6}")
|
||||
sum_indicator_reading.set_fill(color = BLACK)
|
||||
sum_indicator_reading.scale_to_fit_height(0.8 * sum_indicator.get_height())
|
||||
sum_indicator.add(sum_indicator_reading)
|
||||
sum_indicator.move_to(collection_point)
|
||||
|
||||
|
||||
|
||||
|
||||
# for i in range(1,NUM_VISIBLE_CONES+1):
|
||||
|
||||
# # create light indicators
|
||||
# # but they contain fractions!
|
||||
# indicator = LightIndicator(color = LIGHT_COLOR,
|
||||
# radius = INDICATOR_RADIUS,
|
||||
# opacity_for_unit_intensity = OPACITY_FOR_UNIT_INTENSITY,
|
||||
# show_reading = False
|
||||
# )
|
||||
# indicator.set_intensity(intensities[i-1])
|
||||
# indicator_reading = euler_sum[-2+2*i]
|
||||
# indicator_reading.scale_to_fit_height(0.8*indicator.get_height())
|
||||
# indicator_reading.move_to(indicator.get_center())
|
||||
# indicator.add(indicator_reading)
|
||||
# indicator.foreground.set_fill(None,opacities[i-1])
|
||||
|
||||
|
||||
# if i == 1:
|
||||
# indicator.next_to(randy,DOWN,buff = 5)
|
||||
# indicator_reading.scale_to_fit_height(0.4*indicator.get_height())
|
||||
# # otherwise we get a huge 1
|
||||
# else:
|
||||
# indicator.next_to(light_indicators[i-2],DOWN, buff = 0.2)
|
||||
|
||||
# light_indicators.append(indicator)
|
||||
# indicators_as_mob.add(indicator)
|
||||
|
||||
|
||||
# bubble.add_content(indicators_as_mob)
|
||||
# indicators_as_mob.shift(DOWN+0.5*LEFT)
|
||||
|
||||
|
||||
# for lh in lighthouses:
|
||||
# self.add_foreground_mobject(lh)
|
||||
|
||||
|
||||
# # slowly switch on visible light cones and increment indicator
|
||||
# for (i,ambient_light) in zip(range(NUM_VISIBLE_CONES),ambient_lights[:NUM_VISIBLE_CONES]):
|
||||
# indicator_start_time = 0.4 * (i+1) * SWITCH_ON_RUN_TIME/ambient_light.radius * self.number_line.unit_size
|
||||
# indicator_stop_time = indicator_start_time + INDICATOR_UPDATE_TIME
|
||||
# indicator_rate_func = squish_rate_func(
|
||||
# smooth,indicator_start_time,indicator_stop_time)
|
||||
# self.play(
|
||||
# SwitchOn(ambient_light),
|
||||
# FadeIn(light_indicators[i])
|
||||
# )
|
||||
|
||||
# # quickly switch on off-screen light cones and increment indicator
|
||||
# for (i,ambient_light) in zip(range(NUM_VISIBLE_CONES,NUM_CONES),ambient_lights[NUM_VISIBLE_CONES:NUM_CONES]):
|
||||
# indicator_start_time = 0.5 * (i+1) * FAST_SWITCH_ON_RUN_TIME/ambient_light.radius * self.number_line.unit_size
|
||||
# indicator_stop_time = indicator_start_time + FAST_INDICATOR_UPDATE_TIME
|
||||
# indicator_rate_func = squish_rate_func(#smooth, 0.8, 0.9)
|
||||
# smooth,indicator_start_time,indicator_stop_time)
|
||||
# self.play(
|
||||
# SwitchOn(ambient_light, run_time = FAST_SWITCH_ON_RUN_TIME),
|
||||
# )
|
||||
|
||||
|
||||
# # show limit value in light indicator and an equals sign
|
||||
# sum_indicator = LightIndicator(color = LIGHT_COLOR,
|
||||
# radius = INDICATOR_RADIUS,
|
||||
# opacity_for_unit_intensity = OPACITY_FOR_UNIT_INTENSITY,
|
||||
# show_reading = False
|
||||
# )
|
||||
# sum_indicator.set_intensity(intensities[0] * np.pi**2/6)
|
||||
# sum_indicator_reading = TexMobject("{\pi^2 \over 6}")
|
||||
# sum_indicator_reading.scale_to_fit_height(0.8 * sum_indicator.get_height())
|
||||
# sum_indicator.add(sum_indicator_reading)
|
||||
|
||||
# brace = Brace(indicators_as_mob, direction = RIGHT, buff = 0.5)
|
||||
# brace.shift(2*RIGHT)
|
||||
# sum_indicator.next_to(brace,RIGHT)
|
||||
|
||||
|
||||
# self.play(
|
||||
# ShowCreation(brace),
|
||||
# ShowCreation(sum_indicator), # DrawBorderThenFill
|
||||
# )
|
||||
self.play(
|
||||
Transform(collected_indicators,sum_indicator)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
@ -1471,12 +1459,14 @@ class TwoLightSourcesScene(PiCreatureScene):
|
|||
INDICATOR_RADIUS = 0.6
|
||||
OPACITY_FOR_UNIT_INTENSITY = 0.5
|
||||
|
||||
A = np.array([5,-3,0])
|
||||
B = np.array([-5,3,0])
|
||||
C = np.array([-5,-3,0])
|
||||
A = np.array([5.,-3.,0.])
|
||||
B = np.array([-5.,3.,0.])
|
||||
C = np.array([-5.,-3.,0.])
|
||||
|
||||
morty = self.get_primary_pi_creature()
|
||||
morty.scale(0.3).flip().move_to(C)
|
||||
morty.scale(0.3).flip()
|
||||
right_pupil = morty.pupils[1]
|
||||
morty.next_to(C, LEFT, buff = 0, submobject_to_align = right_pupil)
|
||||
|
||||
horizontal = VMobject(stroke_width = 1)
|
||||
horizontal.set_points_as_corners([C,A])
|
||||
|
@ -1501,53 +1491,162 @@ class TwoLightSourcesScene(PiCreatureScene):
|
|||
Write(indicator)
|
||||
)
|
||||
|
||||
ambient_light1 = AmbientLight(max_opacity = MAX_OPACITY)
|
||||
ambient_light1.move_source_to(A)
|
||||
ambient_light2 = AmbientLight(max_opacity = MAX_OPACITY)
|
||||
ambient_light2.move_source_to(B)
|
||||
lighthouse1 = Lighthouse()
|
||||
lighthouse1.next_to(A,DOWN,buff = 0)
|
||||
lighthouse2 = Lighthouse()
|
||||
lighthouse2.next_to(B,DOWN,buff = 0)
|
||||
|
||||
ls1 = LightSource(radius = 20, num_levels = 50)
|
||||
ls2 = ls1.deepcopy()
|
||||
#print "==="
|
||||
#print ls1.get_source_point()
|
||||
ls1.move_source_to(A)
|
||||
#print ls1.get_source_point()
|
||||
#print "==="
|
||||
#print ls2.get_source_point()
|
||||
ls2.move_source_to(B)
|
||||
#print ls2.get_source_point()
|
||||
|
||||
self.play(
|
||||
FadeIn(lighthouse1),
|
||||
FadeIn(lighthouse2),
|
||||
SwitchOn(ambient_light1),
|
||||
SwitchOn(ambient_light2)
|
||||
FadeIn(ls1.lighthouse),
|
||||
FadeIn(ls2.lighthouse),
|
||||
SwitchOn(ls1.ambient_light),
|
||||
SwitchOn(ls2.ambient_light)
|
||||
)
|
||||
|
||||
distance1 = np.linalg.norm(C - ls1.get_source_point())
|
||||
intensity = ls1.ambient_light.opacity_function(distance1) / indicator.opacity_for_unit_intensity
|
||||
distance2 = np.linalg.norm(C - ls2.get_source_point())
|
||||
intensity += ls2.ambient_light.opacity_function(distance2) / indicator.opacity_for_unit_intensity
|
||||
|
||||
self.play(
|
||||
UpdateLightIndicator(indicator,1.5)
|
||||
UpdateLightIndicator(indicator,intensity)
|
||||
)
|
||||
|
||||
self.wait()
|
||||
|
||||
ls3 = ls1.deepcopy()
|
||||
ls3.move_to(np.array([6,3.5,0]))
|
||||
|
||||
new_indicator = indicator.copy()
|
||||
new_indicator.light_source = ls3
|
||||
new_indicator.measurement_point = C
|
||||
self.add(new_indicator)
|
||||
self.play(
|
||||
indicator.shift, 2 * UP
|
||||
)
|
||||
|
||||
ambient_light3 = AmbientLight(max_opacity = MAX_OPACITY)
|
||||
lighthouse3 = Lighthouse()
|
||||
lighthouse3.next_to(ambient_light3,DOWN,buff = 0)
|
||||
ambient_light3.add(lighthouse3)
|
||||
#moving_light.move_to(np.array([6,3.5,0]))
|
||||
|
||||
|
||||
#intensity = intensity_for_light_source(ls3)
|
||||
|
||||
|
||||
self.play(
|
||||
FadeOut(ambient_light1),
|
||||
FadeOut(lighthouse1),
|
||||
FadeOut(ambient_light2),
|
||||
FadeOut(lighthouse2),
|
||||
|
||||
FadeIn(ambient_light3),
|
||||
SwitchOff(ls1.ambient_light),
|
||||
#FadeOut(ls1.lighthouse),
|
||||
SwitchOff(ls2.ambient_light),
|
||||
#FadeOut(ls2.lighthouse),
|
||||
UpdateLightIndicator(new_indicator,0.0)
|
||||
)
|
||||
|
||||
self.wait()
|
||||
# create a *continual* animation for the replacement source
|
||||
updater = ContinualLightIndicatorUpdate(new_indicator)
|
||||
self.add(updater)
|
||||
|
||||
self.play(
|
||||
ambient_light3.shift,UP+RIGHT
|
||||
SwitchOn(ls3.ambient_light),
|
||||
FadeIn(ls3.lighthouse),
|
||||
|
||||
)
|
||||
|
||||
self.wait()
|
||||
|
||||
# move the light source around
|
||||
# TODO: moving along a path arc
|
||||
|
||||
location = np.array([-3,-2.,0.])
|
||||
self.play(ls3.move_source_to,location)
|
||||
location = np.array([6.,1.,0.])
|
||||
self.play(ls3.move_source_to,location)
|
||||
location = np.array([5.,2.,0.])
|
||||
self.play(ls3.move_source_to,location)
|
||||
closer_location = interpolate(location, C, 0.5)
|
||||
self.play(ls3.move_source_to,closer_location)
|
||||
self.play(ls3.move_source_to,location)
|
||||
|
||||
# maybe move in a circle around C using a loop?
|
||||
|
||||
# find the coords of the altitude point H
|
||||
# as the solution of a certain LSE
|
||||
xA = A[0]
|
||||
yA = A[1]
|
||||
xB = B[0]
|
||||
yB = B[1]
|
||||
xC = C[0]
|
||||
yC = C[1]
|
||||
matrix = np.array([[yA - yB, xB - xA], [xA - xB, yA - yB]]) # sic
|
||||
vector = np.array([xB * yA - xA * yB, xC * (xA - xB) + yC * (yA - yB)])
|
||||
H2 = np.linalg.solve(matrix,vector)
|
||||
H = np.append(H2, 0.)
|
||||
|
||||
self.play(ls3.move_source_to,H)
|
||||
|
||||
|
||||
|
||||
# draw lines to complete the geometric picture
|
||||
# and label the lengths
|
||||
|
||||
line_a = VMobject()
|
||||
line_a.set_points_as_corners([B,C])
|
||||
line_b = VMobject()
|
||||
line_b.set_points_as_corners([A,C])
|
||||
line_c = VMobject()
|
||||
line_c.set_points_as_corners([A,B])
|
||||
line_h = VMobject()
|
||||
line_h.set_points_as_corners([H,C])
|
||||
|
||||
label_a = TexMobject("a")
|
||||
label_a.next_to(line_a, LEFT, buff = 0.5)
|
||||
label_b = TexMobject("b")
|
||||
label_b.next_to(line_b, DOWN, buff = 0.5)
|
||||
label_h = TexMobject("h")
|
||||
label_h.next_to(line_h.get_center(), RIGHT, buff = 0.5)
|
||||
|
||||
self.play(
|
||||
ShowCreation(line_a),
|
||||
Write(label_a)
|
||||
)
|
||||
|
||||
self.play(
|
||||
ShowCreation(line_b),
|
||||
Write(label_b)
|
||||
)
|
||||
|
||||
self.play(
|
||||
ShowCreation(line_c),
|
||||
)
|
||||
|
||||
self.play(
|
||||
ShowCreation(line_h),
|
||||
Write(label_h)
|
||||
)
|
||||
|
||||
|
||||
# state the IPT
|
||||
theorem_location = np.array([3.,2.,0.])
|
||||
theorem = TexMobject("{1\over a^2} + {1\over b^2} = {1\over h^2}")
|
||||
theorem_name = TextMobject("Inverse Pythagorean Theorem")
|
||||
buffer = 1.2
|
||||
theorem_box = Rectangle(width = buffer*theorem.get_width(),
|
||||
height = buffer*theorem.get_height())
|
||||
|
||||
theorem.move_to(theorem_location)
|
||||
theorem_box.move_to(theorem_location)
|
||||
theorem_name.next_to(theorem_box,UP)
|
||||
|
||||
self.play(
|
||||
Write(theorem),
|
||||
)
|
||||
|
||||
self.play(
|
||||
ShowCreation(theorem_box),
|
||||
Write(theorem_name),
|
||||
)
|
||||
|
||||
|
||||
|
@ -1557,3 +1656,4 @@ class TwoLightSourcesScene(PiCreatureScene):
|
|||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -30,18 +30,48 @@ from mobject.svg_mobject import *
|
|||
from mobject.tex_mobject import *
|
||||
from topics.graph_scene import *
|
||||
|
||||
USE_ALMOST_FOURIER_BY_DEFAULT = True
|
||||
NUM_SAMPLES_FOR_FFT = 1000
|
||||
|
||||
|
||||
def get_fourier_graph(
|
||||
axes, time_func, t_min, t_max,
|
||||
n_samples = NUM_SAMPLES_FOR_FFT,
|
||||
complex_to_real_func = lambda z : z.real,
|
||||
color = RED,
|
||||
):
|
||||
# N = n_samples
|
||||
# T = time_range/n_samples
|
||||
time_range = float(t_max - t_min)
|
||||
time_step_size = time_range/n_samples
|
||||
time_samples = time_func(np.linspace(t_min, t_max, n_samples))
|
||||
fft_output = np.fft.fft(time_samples)
|
||||
frequencies = np.linspace(0.0, n_samples/(2.0*time_range), n_samples//2)
|
||||
# #Cycles per second of fouier_samples[1]
|
||||
# (1/time_range)*n_samples
|
||||
# freq_step_size = 1./time_range
|
||||
graph = VMobject()
|
||||
graph.set_points_smoothly([
|
||||
axes.coords_to_point(
|
||||
x, 200.0*complex_to_real_func(y)/n_samples,
|
||||
)
|
||||
for x, y in zip(frequencies, fft_output[:n_samples//2])
|
||||
])
|
||||
graph.highlight(color)
|
||||
return graph
|
||||
|
||||
def get_fourier_transform(
|
||||
func, t_min, t_max,
|
||||
real_part = True,
|
||||
use_almost_fourier = True,
|
||||
complex_to_real_func = lambda z : z.real,
|
||||
use_almost_fourier = USE_ALMOST_FOURIER_BY_DEFAULT,
|
||||
):
|
||||
# part = "real" if real_part else "imag"
|
||||
trig = np.cos if real_part else np.sin
|
||||
scalar = 1./(t_max - t_min) if use_almost_fourier else 1.0
|
||||
def fourier_transform(f):
|
||||
return scalar*scipy.integrate.quad(
|
||||
lambda t : func(t)*trig(-TAU*f*t),
|
||||
lambda t : complex_to_real_func(
|
||||
# f(t) e^{-TAU*i*f*t}
|
||||
func(t)*np.exp(complex(0, -TAU*f*t))
|
||||
),
|
||||
t_min, t_max
|
||||
)[0]
|
||||
return fourier_transform
|
||||
|
|
391
active_projects/uncertainty.py
Normal file
391
active_projects/uncertainty.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
from helpers import *
|
||||
import scipy
|
||||
|
||||
from animation.animation import Animation
|
||||
from animation.transform import *
|
||||
from animation.simple_animations import *
|
||||
from animation.playground import *
|
||||
from animation.continual_animation import *
|
||||
from topics.geometry import *
|
||||
from topics.characters import *
|
||||
from topics.functions import *
|
||||
from topics.fractals import *
|
||||
from topics.number_line import *
|
||||
from topics.combinatorics import *
|
||||
from topics.numerals import *
|
||||
from topics.three_dimensions import *
|
||||
from topics.objects import *
|
||||
from topics.probability import *
|
||||
from topics.complex_numbers import *
|
||||
from topics.common_scenes import *
|
||||
from scene import Scene
|
||||
from scene.reconfigurable_scene import ReconfigurableScene
|
||||
from scene.zoomed_scene import *
|
||||
from camera import Camera
|
||||
from mobject import *
|
||||
from mobject.image_mobject import *
|
||||
from mobject.vectorized_mobject import *
|
||||
from mobject.svg_mobject import *
|
||||
from mobject.tex_mobject import *
|
||||
from topics.graph_scene import *
|
||||
|
||||
from active_projects.fourier import *
|
||||
|
||||
|
||||
FREQUENCY_COLOR = RED
|
||||
USE_ALMOST_FOURIER_BY_DEFAULT = False
|
||||
|
||||
class GaussianDistributionWrapper(Line):
|
||||
"""
|
||||
This is meant to encode a 2d normal distribution as
|
||||
a mobject (so as to be able to have it be interpolated
|
||||
during animations). It is a line whose start_point coordinates
|
||||
encode the coordinates of mu, and whose end_point - start_point
|
||||
encodes the coordinates of sigma.
|
||||
"""
|
||||
CONFIG = {
|
||||
"stroke_width" : 0,
|
||||
"mu_x" : 0,
|
||||
"sigma_x" : 1,
|
||||
"mu_y" : 0,
|
||||
"sigma_y" : 0,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
Line.__init__(self, ORIGIN, RIGHT, **kwargs)
|
||||
self.change_parameters(self.mu_x, self.mu_y, self.sigma_x, self.sigma_y)
|
||||
|
||||
def change_parameters(self, mu_x = None, mu_y = None, sigma_x = None, sigma_y = None):
|
||||
curr_parameters = self.get_parameteters()
|
||||
args = [mu_x, mu_y, sigma_x, sigma_y]
|
||||
new_parameters = [
|
||||
arg or curr
|
||||
for curr, arg in zip(curr_parameters, args)
|
||||
]
|
||||
mu_x, mu_y, sigma_x, sigma_y = new_parameters
|
||||
mu_point = mu_x*RIGHT + mu_y*UP
|
||||
sigma_vect = sigma_x*RIGHT + sigma_y*UP
|
||||
self.put_start_and_end_on(mu_point, mu_point + sigma_vect)
|
||||
return self
|
||||
|
||||
def get_parameteters(self):
|
||||
""" Return mu_x, mu_y, sigma_x, sigma_y"""
|
||||
start, end = self.get_start_and_end()
|
||||
return tuple(it.chain(start[:2], (end - start)[:2]))
|
||||
|
||||
def get_random_points(self, size = 1):
|
||||
mu_x, mu_y, sigma_x, sigma_y = self.get_parameteters()
|
||||
x_vals = np.random.normal(mu_x, sigma_x, size)
|
||||
y_vals = np.random.normal(mu_y, sigma_y, size)
|
||||
return np.array([
|
||||
x*RIGHT + y*UP
|
||||
for x, y in zip(x_vals, y_vals)
|
||||
])
|
||||
|
||||
class ProbabalisticMobjectCloud(ContinualAnimation):
|
||||
CONFIG = {
|
||||
"fill_opacity" : 0.25,
|
||||
"n_copies" : 100,
|
||||
"gaussian_distribution_wrapper_config" : {
|
||||
"sigma_x" : 1,
|
||||
}
|
||||
}
|
||||
def __init__(self, prototype, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
fill_opacity = self.fill_opacity or prototype.get_fill_opacity()
|
||||
self.gaussian_distribution_wrapper = GaussianDistributionWrapper(
|
||||
**self.gaussian_distribution_wrapper_config
|
||||
)
|
||||
group = VGroup(*[
|
||||
prototype.copy().set_fill(opacity = fill_opacity)
|
||||
for x in range(self.n_copies)
|
||||
])
|
||||
ContinualAnimation.__init__(self, group, **kwargs)
|
||||
|
||||
def update_mobject(self, dt):
|
||||
group = self.mobject
|
||||
points = self.gaussian_distribution_wrapper.get_random_points(len(group))
|
||||
for mob, point in zip(group, points):
|
||||
self.update_mobject_by_point(mob, point)
|
||||
return self
|
||||
|
||||
def update_mobject_by_point(self, mobject, point):
|
||||
mobject.move_to(point)
|
||||
return self
|
||||
|
||||
class ProbabalisticDotCloud(ProbabalisticMobjectCloud):
|
||||
CONFIG = {
|
||||
"color" : BLUE,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
dot = Dot(color = self.color)
|
||||
ProbabalisticMobjectCloud.__init__(self, dot)
|
||||
|
||||
class ProbabalisticVectorCloud(ProbabalisticMobjectCloud):
|
||||
CONFIG = {
|
||||
"color" : RED,
|
||||
"n_copies" : 20,
|
||||
"fill_opacity" : 0.5,
|
||||
"center_func" : lambda : ORIGIN,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
vector = Vector(
|
||||
RIGHT, color = self.color,
|
||||
max_tip_length_to_length_ratio = 1,
|
||||
)
|
||||
ProbabalisticMobjectCloud.__init__(self, vector)
|
||||
|
||||
def update_mobject_by_point(self, vector, point):
|
||||
vector.put_start_and_end_on(
|
||||
self.center_func(),
|
||||
point
|
||||
)
|
||||
|
||||
###################
|
||||
|
||||
class MentionUncertaintyPrinciple(TeacherStudentsScene):
|
||||
def construct(self):
|
||||
title = TextMobject("Heisenberg Uncertainty Principle")
|
||||
title.to_edge(UP)
|
||||
|
||||
dot_cloud = ProbabalisticDotCloud()
|
||||
vector_cloud = ProbabalisticVectorCloud(
|
||||
gaussian_distribution_wrapper_config = {"sigma_x" : 0.2},
|
||||
center_func = dot_cloud.gaussian_distribution_wrapper.get_start,
|
||||
)
|
||||
for cloud in dot_cloud, vector_cloud:
|
||||
gdw = cloud.gaussian_distribution_wrapper
|
||||
gdw.move_to(title.get_center(), LEFT)
|
||||
gdw.shift(2*DOWN)
|
||||
vector_cloud.gaussian_distribution_wrapper.shift(3*RIGHT)
|
||||
|
||||
def get_brace_text_group_update(gdw, vect, text):
|
||||
brace = Brace(gdw, vect)
|
||||
text = brace.get_tex("\\sigma_{\\text{%s}}"%text, buff = SMALL_BUFF)
|
||||
group = VGroup(brace, text)
|
||||
def update_group(group):
|
||||
brace, text = group
|
||||
brace.match_width(gdw, stretch = True)
|
||||
brace.next_to(gdw, vect)
|
||||
text.next_to(brace, vect, buff = SMALL_BUFF)
|
||||
return ContinualUpdateFromFunc(group, update_group)
|
||||
|
||||
dot_brace_anim = get_brace_text_group_update(
|
||||
dot_cloud.gaussian_distribution_wrapper,
|
||||
DOWN, "position",
|
||||
)
|
||||
vector_brace_anim = get_brace_text_group_update(
|
||||
vector_cloud.gaussian_distribution_wrapper,
|
||||
UP, "momentum",
|
||||
)
|
||||
|
||||
self.add(title)
|
||||
self.add(dot_cloud)
|
||||
self.play(
|
||||
Write(title),
|
||||
self.teacher.change, "raise_right_hand",
|
||||
self.get_student_changes(*["pondering"]*3)
|
||||
)
|
||||
self.play(
|
||||
Write(dot_brace_anim.mobject, run_time = 1)
|
||||
)
|
||||
self.add(dot_brace_anim)
|
||||
self.wait()
|
||||
# self.wait(2)
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
run_time = 2,
|
||||
)
|
||||
self.wait()
|
||||
self.add(vector_cloud)
|
||||
self.play(
|
||||
FadeIn(vector_brace_anim.mobject)
|
||||
)
|
||||
self.add(vector_brace_anim)
|
||||
self.play(
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 1},
|
||||
self.get_student_changes(*3*["confused"]),
|
||||
run_time = 3,
|
||||
)
|
||||
#Back and forth
|
||||
for x in range(2):
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 2},
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
run_time = 3,
|
||||
)
|
||||
self.change_student_modes("thinking", "erm", "sassy")
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 1},
|
||||
run_time = 3,
|
||||
)
|
||||
self.wait()
|
||||
|
||||
class FourierTradeoff(Scene):
|
||||
def construct(self):
|
||||
#Setup axes
|
||||
time_mean = 4
|
||||
time_axes = Axes(
|
||||
x_min = 0,
|
||||
x_max = 2*time_mean,
|
||||
x_axis_config = {"unit_size" : 1.5},
|
||||
y_min = -2,
|
||||
y_max = 2,
|
||||
y_axis_config = {"unit_size" : 0.5}
|
||||
)
|
||||
time_label = TextMobject("Time")
|
||||
time_label.next_to(
|
||||
time_axes.x_axis.get_right(), UP,
|
||||
buff = MED_SMALL_BUFF,
|
||||
)
|
||||
time_axes.add(time_label)
|
||||
time_axes.center().to_edge(UP)
|
||||
time_axes.x_axis.add_numbers(*range(1, 2*time_mean))
|
||||
|
||||
frequency_axes = Axes(
|
||||
x_min = 0,
|
||||
x_max = 8,
|
||||
x_axis_config = {"unit_size" : 1.5},
|
||||
y_min = 0,
|
||||
y_max = 15,
|
||||
y_axis_config = {
|
||||
"unit_size" : 0.15,
|
||||
"tick_frequency" : 5,
|
||||
},
|
||||
color = TEAL,
|
||||
)
|
||||
frequency_label = TextMobject("Frequency")
|
||||
frequency_label.next_to(
|
||||
frequency_axes.x_axis.get_right(), UP,
|
||||
buff = MED_SMALL_BUFF,
|
||||
)
|
||||
frequency_label.highlight(FREQUENCY_COLOR)
|
||||
frequency_axes.add(frequency_label)
|
||||
frequency_axes.move_to(time_axes, LEFT)
|
||||
frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)
|
||||
frequency_axes.x_axis.add_numbers()
|
||||
|
||||
# Graph information
|
||||
|
||||
#x-coordinate of this point determines width of wave_packet graph
|
||||
width_tracker = VectorizedPoint(0.5*RIGHT)
|
||||
def get_width():
|
||||
return width_tracker.get_center()[0]
|
||||
|
||||
def get_wave_packet_function():
|
||||
factor = 1./get_width()
|
||||
return lambda t : np.sqrt(factor)*np.cos(4*TAU*t)*np.exp(-factor*(t-time_mean)**2)
|
||||
|
||||
def get_wave_packet():
|
||||
graph = time_axes.get_graph(
|
||||
get_wave_packet_function(),
|
||||
num_graph_points = 200,
|
||||
)
|
||||
graph.highlight(YELLOW)
|
||||
return graph
|
||||
|
||||
time_radius = 10
|
||||
def get_wave_packet_fourier_transform():
|
||||
return get_fourier_graph(
|
||||
frequency_axes, get_wave_packet_function(),
|
||||
t_min = time_mean - time_radius,
|
||||
t_max = time_mean + time_radius,
|
||||
n_samples = 2*time_radius*17,
|
||||
complex_to_real_func = abs,
|
||||
color = FREQUENCY_COLOR,
|
||||
)
|
||||
|
||||
wave_packet = get_wave_packet()
|
||||
wave_packet_update = UpdateFromFunc(
|
||||
wave_packet,
|
||||
lambda g : Transform(g, get_wave_packet()).update(1)
|
||||
)
|
||||
fourier_graph = get_wave_packet_fourier_transform()
|
||||
fourier_graph_update = UpdateFromFunc(
|
||||
fourier_graph,
|
||||
lambda g : Transform(g, get_wave_packet_fourier_transform()).update(1)
|
||||
)
|
||||
|
||||
arrow = Arrow(
|
||||
wave_packet, frequency_axes.coords_to_point(4, 10),
|
||||
color = FREQUENCY_COLOR,
|
||||
)
|
||||
fourier_words = TextMobject("Fourier Transform")
|
||||
fourier_words.next_to(arrow, RIGHT, buff = MED_LARGE_BUFF)
|
||||
sub_words = TextMobject("(To be explained shortly)")
|
||||
sub_words.highlight(BLUE)
|
||||
sub_words.scale(0.75)
|
||||
sub_words.next_to(fourier_words, DOWN)
|
||||
|
||||
#Draw items
|
||||
self.add(time_axes, frequency_axes)
|
||||
self.play(ShowCreation(wave_packet))
|
||||
self.play(
|
||||
ReplacementTransform(
|
||||
wave_packet.copy(),
|
||||
fourier_graph,
|
||||
),
|
||||
GrowArrow(arrow),
|
||||
Write(fourier_words, run_time = 1)
|
||||
)
|
||||
# self.play(FadeOut(arrow))
|
||||
self.wait()
|
||||
for width in 6, 0.1, 1:
|
||||
self.play(
|
||||
width_tracker.move_to, width*RIGHT,
|
||||
wave_packet_update,
|
||||
fourier_graph_update,
|
||||
run_time = 3
|
||||
)
|
||||
if sub_words not in self.mobjects:
|
||||
self.play(FadeIn(sub_words))
|
||||
else:
|
||||
self.wait()
|
||||
self.wait()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ class Transform(Animation):
|
|||
self.path_arc,
|
||||
self.path_arc_axis,
|
||||
)
|
||||
|
||||
|
||||
def get_all_mobjects(self):
|
||||
return self.mobject, self.starting_mobject, self.target_mobject
|
||||
|
||||
|
|
236
camera/camera.py
236
camera/camera.py
|
@ -8,7 +8,7 @@ import aggdraw
|
|||
|
||||
from helpers import *
|
||||
from mobject import Mobject, PMobject, VMobject, \
|
||||
ImageMobject, Group, BackgroundColoredVMobject
|
||||
ImageMobject, Group
|
||||
|
||||
class Camera(object):
|
||||
CONFIG = {
|
||||
|
@ -31,7 +31,11 @@ class Camera(object):
|
|||
"image_mode" : "RGBA",
|
||||
"n_rgb_coords" : 4,
|
||||
"background_alpha" : 0, #Out of color_max_val
|
||||
"pixel_array_dtype" : 'uint8'
|
||||
"pixel_array_dtype" : 'uint8',
|
||||
"use_z_coordinate_for_display_order" : False,
|
||||
# z_buff_func is only used if the flag above is set to True.
|
||||
# round z coordinate to nearest hundredth when comparring
|
||||
"z_buff_func" : lambda m : np.round(m.get_center()[2], 2),
|
||||
}
|
||||
|
||||
def __init__(self, background = None, **kwargs):
|
||||
|
@ -94,7 +98,12 @@ class Camera(object):
|
|||
return retval
|
||||
|
||||
def set_pixel_array(self, pixel_array, convert_from_floats = False):
|
||||
self.pixel_array = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
converted_array = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
if not hasattr(self, "pixel_array"): #TODO: And the shapes match?
|
||||
self.pixel_array = converted_array
|
||||
else:
|
||||
#Set in place
|
||||
self.pixel_array[:,:,:] = converted_array[:,:,:]
|
||||
|
||||
def set_background(self, pixel_array, convert_from_floats = False):
|
||||
self.background = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
|
@ -141,8 +150,6 @@ class Camera(object):
|
|||
self, mobjects,
|
||||
include_submobjects = True,
|
||||
excluded_mobjects = None,
|
||||
#Round z coordinate to nearest hundredth when comparring
|
||||
z_buff_func = lambda m : np.round(m.get_center()[2], 2)
|
||||
):
|
||||
if include_submobjects:
|
||||
mobjects = self.extract_mobject_family_members(
|
||||
|
@ -154,10 +161,16 @@ class Camera(object):
|
|||
)
|
||||
mobjects = list_difference_update(mobjects, all_excluded)
|
||||
|
||||
# Should perhaps think about what happens here when include_submobjects is False,
|
||||
# (for now, the onus is then on the caller to ensure this is handled correctly by
|
||||
# passing us an appropriately pre-flattened list of mobjects if need be)
|
||||
return sorted(mobjects, lambda a, b: cmp(z_buff_func(a), z_buff_func(b)))
|
||||
if self.use_z_coordinate_for_display_order:
|
||||
# Should perhaps think about what happens here when include_submobjects is False,
|
||||
# (for now, the onus is then on the caller to ensure this is handled correctly by
|
||||
# passing us an appropriately pre-flattened list of mobjects if need be)
|
||||
return sorted(
|
||||
mobjects,
|
||||
lambda a, b: cmp(self.z_buff_func(a), self.z_buff_func(b))
|
||||
)
|
||||
else:
|
||||
return mobjects
|
||||
|
||||
def capture_mobject(self, mobject, **kwargs):
|
||||
return self.capture_mobjects([mobject], **kwargs)
|
||||
|
@ -166,15 +179,13 @@ class Camera(object):
|
|||
mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
|
||||
vmobjects = []
|
||||
for mobject in mobjects:
|
||||
if isinstance(mobject, VMobject) and not isinstance(mobject, BackgroundColoredVMobject):
|
||||
vmobjects.append(mobject)
|
||||
if isinstance(mobject, VMobject):
|
||||
vmobjects.append(mobject)
|
||||
elif len(vmobjects) > 0:
|
||||
self.display_multiple_vectorized_mobjects(vmobjects)
|
||||
vmobjects = []
|
||||
|
||||
if isinstance(mobject, BackgroundColoredVMobject):
|
||||
self.display_background_colored_vmobject(mobject)
|
||||
elif isinstance(mobject, PMobject):
|
||||
if isinstance(mobject, PMobject):
|
||||
self.display_point_cloud(
|
||||
mobject.points, mobject.rgbas,
|
||||
self.adjusted_thickness(mobject.stroke_width)
|
||||
|
@ -190,37 +201,65 @@ class Camera(object):
|
|||
#TODO, more? Call out if it's unknown?
|
||||
self.display_multiple_vectorized_mobjects(vmobjects)
|
||||
|
||||
## Methods associated with svg rendering
|
||||
|
||||
def get_aggdraw_canvas(self):
|
||||
if not hasattr(self, "canvas"):
|
||||
self.reset_aggdraw_canvas()
|
||||
return self.canvas
|
||||
|
||||
def reset_aggdraw_canvas(self):
|
||||
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
|
||||
self.canvas = aggdraw.Draw(image)
|
||||
|
||||
def display_multiple_vectorized_mobjects(self, vmobjects):
|
||||
if len(vmobjects) == 0:
|
||||
return
|
||||
#More efficient to bundle together in one "canvas"
|
||||
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
|
||||
canvas = aggdraw.Draw(image)
|
||||
batches = batch_by_property(
|
||||
vmobjects,
|
||||
lambda vm : vm.get_background_image_file()
|
||||
)
|
||||
for batch in batches:
|
||||
if batch[0].get_background_image_file():
|
||||
self.display_multiple_background_colored_vmobject(batch)
|
||||
else:
|
||||
self.display_multiple_non_background_colored_vmobjects(batch)
|
||||
|
||||
def display_multiple_non_background_colored_vmobjects(self, vmobjects):
|
||||
self.reset_aggdraw_canvas()
|
||||
canvas = self.get_aggdraw_canvas()
|
||||
for vmobject in vmobjects:
|
||||
self.display_vectorized(vmobject, canvas)
|
||||
canvas.flush()
|
||||
|
||||
self.pixel_array[:,:] = image
|
||||
|
||||
def display_vectorized(self, vmobject, canvas):
|
||||
def display_vectorized(self, vmobject, canvas = None):
|
||||
if vmobject.is_subpath:
|
||||
#Subpath vectorized mobjects are taken care
|
||||
#of by their parent
|
||||
return
|
||||
canvas = canvas or self.get_aggdraw_canvas()
|
||||
pen, fill = self.get_pen_and_fill(vmobject)
|
||||
pathstring = self.get_pathstring(vmobject)
|
||||
symbol = aggdraw.Symbol(pathstring)
|
||||
canvas.symbol((0, 0), symbol, pen, fill)
|
||||
|
||||
def get_pen_and_fill(self, vmobject):
|
||||
pen = aggdraw.Pen(
|
||||
self.color_to_hex_l(self.get_stroke_color(vmobject)),
|
||||
max(vmobject.stroke_width, 0)
|
||||
)
|
||||
fill = aggdraw.Brush(
|
||||
self.color_to_hex_l(self.get_fill_color(vmobject)),
|
||||
opacity = int(self.color_max_val*vmobject.get_fill_opacity())
|
||||
)
|
||||
stroke_width = max(vmobject.get_stroke_width(), 0)
|
||||
if stroke_width == 0:
|
||||
pen = None
|
||||
else:
|
||||
stroke_rgb = self.get_stroke_rgb(vmobject)
|
||||
stroke_hex = rgb_to_hex(stroke_rgb)
|
||||
pen = aggdraw.Pen(stroke_hex, stroke_width)
|
||||
|
||||
fill_opacity = int(self.color_max_val*vmobject.get_fill_opacity())
|
||||
if fill_opacity == 0:
|
||||
fill = None
|
||||
else:
|
||||
fill_rgb = self.get_fill_rgb(vmobject)
|
||||
fill_hex = rgb_to_hex(fill_rgb)
|
||||
fill = aggdraw.Brush(fill_hex, fill_opacity)
|
||||
|
||||
return (pen, fill)
|
||||
|
||||
def color_to_hex_l(self, color):
|
||||
|
@ -229,57 +268,49 @@ class Camera(object):
|
|||
except:
|
||||
return Color(BLACK).get_hex_l()
|
||||
|
||||
def get_stroke_color(self, vmobject):
|
||||
return vmobject.get_stroke_color()
|
||||
def get_stroke_rgb(self, vmobject):
|
||||
return vmobject.get_stroke_rgb()
|
||||
|
||||
def get_fill_color(self, vmobject):
|
||||
return vmobject.get_fill_color()
|
||||
def get_fill_rgb(self, vmobject):
|
||||
return vmobject.get_fill_rgb()
|
||||
|
||||
def get_pathstring(self, vmobject):
|
||||
result = ""
|
||||
result = ""
|
||||
for mob in [vmobject]+vmobject.get_subpath_mobjects():
|
||||
points = mob.points
|
||||
# points = self.adjust_out_of_range_points(points)
|
||||
if len(points) == 0:
|
||||
continue
|
||||
points = self.align_points_to_camera(points)
|
||||
coords = self.points_to_pixel_coords(points)
|
||||
start = "M%d %d"%tuple(coords[0])
|
||||
#(handle1, handle2, anchor) tripletes
|
||||
triplets = zip(*[
|
||||
coords[i+1::3]
|
||||
for i in range(3)
|
||||
])
|
||||
cubics = [
|
||||
"C" + " ".join(map(str, it.chain(*triplet)))
|
||||
for triplet in triplets
|
||||
]
|
||||
end = "Z" if vmobject.mark_paths_closed else ""
|
||||
result += " ".join([start] + cubics + [end])
|
||||
aligned_points = self.align_points_to_camera(points)
|
||||
coords = self.points_to_pixel_coords(aligned_points)
|
||||
coord_strings = coords.flatten().astype(str)
|
||||
#Start new path string with M
|
||||
coord_strings[0] = "M" + coord_strings[0]
|
||||
#The C at the start of every 6th number communicates
|
||||
#that the following 6 define a cubic Bezier
|
||||
coord_strings[2::6] = map(lambda s : "C" + str(s), coord_strings[2::6])
|
||||
#Possibly finish with "Z"
|
||||
if vmobject.mark_paths_closed:
|
||||
coord_strings[-1] = coord_strings[-1] + " Z"
|
||||
result += " ".join(coord_strings)
|
||||
return result
|
||||
|
||||
def display_background_colored_vmobject(self, cvmobject):
|
||||
mob_array = np.zeros(
|
||||
self.pixel_array.shape,
|
||||
dtype = self.pixel_array_dtype
|
||||
)
|
||||
image = Image.fromarray(mob_array, mode = self.image_mode)
|
||||
canvas = aggdraw.Draw(image)
|
||||
self.display_vectorized(cvmobject, canvas)
|
||||
canvas.flush()
|
||||
cv_background = cvmobject.background_array
|
||||
if not np.all(self.pixel_array.shape == cv_background):
|
||||
cvmobject.resize_background_array_to_match(self.pixel_array)
|
||||
cv_background = cvmobject.background_array
|
||||
array = np.array(
|
||||
(np.array(mob_array).astype('float')/255.)*\
|
||||
np.array(cv_background),
|
||||
dtype = self.pixel_array_dtype
|
||||
)
|
||||
self.pixel_array[:,:] = np.maximum(
|
||||
self.pixel_array, array
|
||||
)
|
||||
def get_background_colored_vmobject_displayer(self):
|
||||
#Quite wordy to type out a bunch
|
||||
long_name = "background_colored_vmobject_displayer"
|
||||
if not hasattr(self, long_name):
|
||||
setattr(self, long_name, BackgroundColoredVMobjectDisplayer(self))
|
||||
return getattr(self, long_name)
|
||||
|
||||
def display_multiple_background_colored_vmobject(self, cvmobjects):
|
||||
displayer = self.get_background_colored_vmobject_displayer()
|
||||
cvmobject_pixel_array = displayer.display(*cvmobjects)
|
||||
self.pixel_array[:,:] = np.maximum(
|
||||
self.pixel_array, cvmobject_pixel_array
|
||||
)
|
||||
return self
|
||||
|
||||
## Methods for other rendering
|
||||
|
||||
def display_point_cloud(self, points, rgbas, thickness):
|
||||
if len(points) == 0:
|
||||
|
@ -475,6 +506,75 @@ class Camera(object):
|
|||
|
||||
return centered_space_coords
|
||||
|
||||
class BackgroundColoredVMobjectDisplayer(object):
|
||||
def __init__(self, camera):
|
||||
self.camera = camera
|
||||
self.file_name_to_pixel_array_map = {}
|
||||
self.init_canvas()
|
||||
|
||||
def init_canvas(self):
|
||||
self.pixel_array = np.zeros(
|
||||
self.camera.pixel_array.shape,
|
||||
dtype = self.camera.pixel_array_dtype,
|
||||
)
|
||||
self.reset_canvas()
|
||||
|
||||
def reset_canvas(self):
|
||||
image = Image.fromarray(self.pixel_array, mode = self.camera.image_mode)
|
||||
self.canvas = aggdraw.Draw(image)
|
||||
|
||||
def resize_background_array(
|
||||
self, background_array,
|
||||
new_width, new_height,
|
||||
mode = "RGBA"
|
||||
):
|
||||
image = Image.fromarray(background_array, mode = mode)
|
||||
resized_image = image.resize((new_width, new_height))
|
||||
return np.array(resized_image)
|
||||
|
||||
def resize_background_array_to_match(self, background_array, pixel_array):
|
||||
height, width = pixel_array.shape[:2]
|
||||
mode = "RGBA" if pixel_array.shape[2] == 4 else "RGB"
|
||||
return self.resize_background_array(background_array, width, height, mode)
|
||||
|
||||
def get_background_array(self, cvmobject):
|
||||
file_name = cvmobject.get_background_image_file()
|
||||
if file_name in self.file_name_to_pixel_array_map:
|
||||
return self.file_name_to_pixel_array_map[file_name]
|
||||
full_path = get_full_raster_image_path(file_name)
|
||||
image = Image.open(full_path)
|
||||
array = np.array(image)
|
||||
|
||||
camera = self.camera
|
||||
if not np.all(camera.pixel_array.shape == array.shape):
|
||||
array = self.resize_background_array_to_match(array, camera.pixel_array)
|
||||
|
||||
self.file_name_to_pixel_array_map[file_name] = array
|
||||
return array
|
||||
|
||||
def display(self, *cvmobjects):
|
||||
batches = batch_by_property(
|
||||
cvmobjects, lambda cv : cv.get_background_image_file()
|
||||
)
|
||||
curr_array = None
|
||||
for batch in batches:
|
||||
background_array = self.get_background_array(batch[0])
|
||||
for cvmobject in batch:
|
||||
self.camera.display_vectorized(cvmobject, self.canvas)
|
||||
self.canvas.flush()
|
||||
new_array = np.array(
|
||||
(background_array*self.pixel_array.astype('float')/255),
|
||||
dtype = self.camera.pixel_array_dtype
|
||||
)
|
||||
if curr_array is None:
|
||||
curr_array = new_array
|
||||
else:
|
||||
curr_array = np.maximum(curr_array, new_array)
|
||||
self.pixel_array[:,:] = 0
|
||||
self.reset_canvas()
|
||||
return curr_array
|
||||
|
||||
|
||||
class MovingCamera(Camera):
|
||||
"""
|
||||
Stays in line with the height, width and position
|
||||
|
|
|
@ -68,7 +68,7 @@ def get_configuration():
|
|||
for short_arg, long_arg in optional_args:
|
||||
parser.add_argument(short_arg, long_arg, action = "store_true")
|
||||
parser.add_argument("-o", "--output_name")
|
||||
parser.add_argument("-n", "--skip_to_animation_number")
|
||||
parser.add_argument("-n", "--start_at_animation_number")
|
||||
args = parser.parse_args()
|
||||
except argparse.ArgumentError as err:
|
||||
print(str(err))
|
||||
|
@ -88,7 +88,8 @@ def get_configuration():
|
|||
"ignore_waits" : args.preview,
|
||||
"write_all" : args.write_all,
|
||||
"output_name" : args.output_name,
|
||||
"skip_to_animation_number" : args.skip_to_animation_number,
|
||||
"start_at_animation_number" : args.start_at_animation_number,
|
||||
"end_at_animation_number" : None,
|
||||
}
|
||||
if args.low_quality:
|
||||
config["camera_config"] = LOW_QUALITY_CAMERA_CONFIG
|
||||
|
@ -100,13 +101,18 @@ def get_configuration():
|
|||
config["camera_config"] = PRODUCTION_QUALITY_CAMERA_CONFIG
|
||||
config["frame_duration"] = PRODUCTION_QUALITY_FRAME_DURATION
|
||||
|
||||
stan = config["skip_to_animation_number"]
|
||||
stan = config["start_at_animation_number"]
|
||||
if stan is not None:
|
||||
config["skip_to_animation_number"] = int(stan)
|
||||
if "," in stan:
|
||||
start, end = stan.split(",")
|
||||
config["start_at_animation_number"] = int(start)
|
||||
config["end_at_animation_number"] = int(end)
|
||||
else:
|
||||
config["start_at_animation_number"] = int(stan)
|
||||
|
||||
config["skip_animations"] = any([
|
||||
config["show_last_frame"] and not config["write_to_movie"],
|
||||
config["skip_to_animation_number"],
|
||||
config["start_at_animation_number"],
|
||||
])
|
||||
return config
|
||||
|
||||
|
@ -220,7 +226,8 @@ def main():
|
|||
"write_to_movie",
|
||||
"output_directory",
|
||||
"save_pngs",
|
||||
"skip_to_animation_number",
|
||||
"start_at_animation_number",
|
||||
"end_at_animation_number",
|
||||
]
|
||||
])
|
||||
|
||||
|
|
20
helpers.py
20
helpers.py
|
@ -126,7 +126,7 @@ def rgba_to_color(rgba):
|
|||
return rgb_to_color(rgba[:3])
|
||||
|
||||
def rgb_to_hex(rgb):
|
||||
return Color(rgb = rgb).get_hex_l()
|
||||
return "#" + "".join('%02x'%int(255*x) for x in rgb)
|
||||
|
||||
def invert_color(color):
|
||||
return rgb_to_color(1.0 - color_to_rgb(color))
|
||||
|
@ -226,6 +226,24 @@ def all_elements_are_instances(iterable, Class):
|
|||
def adjacent_pairs(objects):
|
||||
return zip(objects, list(objects[1:])+[objects[0]])
|
||||
|
||||
def batch_by_property(items, property_func):
|
||||
batches = []
|
||||
def add_batch(batch):
|
||||
if len(batch) > 0:
|
||||
batches.append(batch)
|
||||
curr_batch = []
|
||||
curr_prop = None
|
||||
for item in items:
|
||||
prop = property_func(item)
|
||||
if prop != curr_prop:
|
||||
add_batch(curr_batch)
|
||||
curr_prop = prop
|
||||
curr_batch = [item]
|
||||
else:
|
||||
curr_batch.append(item)
|
||||
add_batch(curr_batch)
|
||||
return batches
|
||||
|
||||
def complex_to_R3(complex_num):
|
||||
return np.array((complex_num.real, complex_num.imag, 0))
|
||||
|
||||
|
|
|
@ -6,5 +6,5 @@ __all__ = [
|
|||
|
||||
from mobject import Mobject, Group
|
||||
from point_cloud_mobject import Point, Mobject1D, Mobject2D, PMobject
|
||||
from vectorized_mobject import VMobject, VGroup, BackgroundColoredVMobject
|
||||
from vectorized_mobject import VMobject, VGroup
|
||||
from image_mobject import ImageMobject
|
|
@ -103,7 +103,8 @@ class Mobject(Container):
|
|||
def copy(self):
|
||||
#TODO, either justify reason for shallow copy, or
|
||||
#remove this redundancy everywhere
|
||||
# return self.deepcopy()
|
||||
return self.deepcopy()
|
||||
|
||||
copy_mobject = copy.copy(self)
|
||||
copy_mobject.points = np.array(self.points)
|
||||
copy_mobject.submobjects = [
|
||||
|
|
|
@ -17,6 +17,7 @@ class VMobject(Mobject):
|
|||
"propagate_style_to_family" : False,
|
||||
"pre_function_handle_to_anchor_scale_factor" : 0.01,
|
||||
"make_smooth_after_applying_functions" : False,
|
||||
"background_image_file" : None,
|
||||
}
|
||||
|
||||
def get_group_class(self):
|
||||
|
@ -120,6 +121,9 @@ class VMobject(Mobject):
|
|||
)
|
||||
return self
|
||||
|
||||
def get_fill_rgb(self):
|
||||
return self.fill_rgb
|
||||
|
||||
def get_fill_color(self):
|
||||
try:
|
||||
self.fill_rgb = np.clip(self.fill_rgb, 0.0, 1.0)
|
||||
|
@ -130,6 +134,9 @@ class VMobject(Mobject):
|
|||
def get_fill_opacity(self):
|
||||
return np.clip(self.fill_opacity, 0, 1)
|
||||
|
||||
def get_stroke_rgb(self):
|
||||
return self.stroke_rgb
|
||||
|
||||
def get_stroke_color(self):
|
||||
try:
|
||||
self.stroke_rgb = np.clip(self.stroke_rgb, 0, 1)
|
||||
|
@ -145,6 +152,16 @@ class VMobject(Mobject):
|
|||
return self.get_stroke_color()
|
||||
return self.get_fill_color()
|
||||
|
||||
def color_using_background_image(self, background_image_file):
|
||||
self.background_image_file = background_image_file
|
||||
self.highlight(WHITE)
|
||||
for submob in self.submobjects:
|
||||
submob.color_using_background_image(background_image_file)
|
||||
return self
|
||||
|
||||
def get_background_image_file(self):
|
||||
return self.background_image_file
|
||||
|
||||
## Drawing
|
||||
def start_at(self, point):
|
||||
if len(self.points) == 0:
|
||||
|
@ -458,46 +475,10 @@ class VectorizedPoint(VMobject):
|
|||
def get_height(self):
|
||||
return self.artificial_height
|
||||
|
||||
class BackgroundColoredVMobject(VMobject):
|
||||
CONFIG = {
|
||||
# Can be set to None, using set_background_array to initialize instead
|
||||
"background_image_file" : "color_background",
|
||||
"stroke_color" : WHITE,
|
||||
"fill_color" : WHITE,
|
||||
}
|
||||
def __init__(self, vmobject, **kwargs):
|
||||
# Note: At the moment, this does nothing to mimic
|
||||
# the full family of the vmobject passed in.
|
||||
VMobject.__init__(self, **kwargs)
|
||||
|
||||
#Match properties of vmobject
|
||||
self.points = np.array(vmobject.points)
|
||||
self.set_stroke(WHITE, vmobject.get_stroke_width())
|
||||
self.set_fill(WHITE, vmobject.get_fill_opacity())
|
||||
for submob in vmobject.submobjects:
|
||||
self.add(BackgroundColoredVMobject(submob, **kwargs))
|
||||
|
||||
if self.background_image_file != None:
|
||||
#Initialize background array
|
||||
path = get_full_raster_image_path(self.background_image_file)
|
||||
image = Image.open(path)
|
||||
self.set_background_array(np.array(image))
|
||||
|
||||
def set_background_array(self, background_array):
|
||||
self.background_array = background_array
|
||||
|
||||
def resize_background_array(self, new_width, new_height, mode = "RGBA"):
|
||||
image = Image.fromarray(self.background_array, mode = mode)
|
||||
resized_image = image.resize((new_width, new_height))
|
||||
self.background_array = np.array(resized_image)
|
||||
|
||||
def resize_background_array_to_match(self, pixel_array):
|
||||
height, width = pixel_array.shape[:2]
|
||||
mode = "RGBA" if pixel_array.shape[2] == 4 else "RGB"
|
||||
self.resize_background_array(width, height, mode)
|
||||
|
||||
|
||||
|
||||
def get_location(self):
|
||||
return self.get_anchors()[0]
|
||||
|
||||
def set_location(self,new_loc):
|
||||
self.set_points(np.array([new_loc]))
|
||||
|
||||
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
"""
|
||||
mnist_loader
|
||||
~~~~~~~~~~~~
|
||||
|
||||
A library to load the MNIST image data. For details of the data
|
||||
structures that are returned, see the doc strings for ``load_data``
|
||||
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
|
||||
function usually called by our neural network code.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import cPickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
def load_data():
|
||||
"""Return the MNIST data as a tuple containing the training data,
|
||||
the validation data, and the test data.
|
||||
|
||||
The ``training_data`` is returned as a tuple with two entries.
|
||||
The first entry contains the actual training images. This is a
|
||||
numpy ndarray with 50,000 entries. Each entry is, in turn, a
|
||||
numpy ndarray with 784 values, representing the 28 * 28 = 784
|
||||
pixels in a single MNIST image.
|
||||
|
||||
The second entry in the ``training_data`` tuple is a numpy ndarray
|
||||
containing 50,000 entries. Those entries are just the digit
|
||||
values (0...9) for the corresponding images contained in the first
|
||||
entry of the tuple.
|
||||
|
||||
The ``validation_data`` and ``test_data`` are similar, except
|
||||
each contains only 10,000 images.
|
||||
|
||||
This is a nice data format, but for use in neural networks it's
|
||||
helpful to modify the format of the ``training_data`` a little.
|
||||
That's done in the wrapper function ``load_data_wrapper()``, see
|
||||
below.
|
||||
"""
|
||||
f = gzip.open('/Users/grant/cs/neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')
|
||||
training_data, validation_data, test_data = cPickle.load(f)
|
||||
f.close()
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def load_data_wrapper():
|
||||
"""Return a tuple containing ``(training_data, validation_data,
|
||||
test_data)``. Based on ``load_data``, but the format is more
|
||||
convenient for use in our implementation of neural networks.
|
||||
|
||||
In particular, ``training_data`` is a list containing 50,000
|
||||
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
|
||||
containing the input image. ``y`` is a 10-dimensional
|
||||
numpy.ndarray representing the unit vector corresponding to the
|
||||
correct digit for ``x``.
|
||||
|
||||
``validation_data`` and ``test_data`` are lists containing 10,000
|
||||
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
|
||||
numpy.ndarry containing the input image, and ``y`` is the
|
||||
corresponding classification, i.e., the digit values (integers)
|
||||
corresponding to ``x``.
|
||||
|
||||
Obviously, this means we're using slightly different formats for
|
||||
the training data and the validation / test data. These formats
|
||||
turn out to be the most convenient for use in our neural network
|
||||
code."""
|
||||
tr_d, va_d, te_d = load_data()
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
|
||||
training_results = [vectorized_result(y) for y in tr_d[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
|
||||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
"""
|
||||
mnist_loader
|
||||
~~~~~~~~~~~~
|
||||
|
||||
A library to load the MNIST image data. For details of the data
|
||||
structures that are returned, see the doc strings for ``load_data``
|
||||
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
|
||||
function usually called by our neural network code.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import cPickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
def load_data():
|
||||
"""Return the MNIST data as a tuple containing the training data,
|
||||
the validation data, and the test data.
|
||||
|
||||
The ``training_data`` is returned as a tuple with two entries.
|
||||
The first entry contains the actual training images. This is a
|
||||
numpy ndarray with 50,000 entries. Each entry is, in turn, a
|
||||
numpy ndarray with 784 values, representing the 28 * 28 = 784
|
||||
pixels in a single MNIST image.
|
||||
|
||||
The second entry in the ``training_data`` tuple is a numpy ndarray
|
||||
containing 50,000 entries. Those entries are just the digit
|
||||
values (0...9) for the corresponding images contained in the first
|
||||
entry of the tuple.
|
||||
|
||||
The ``validation_data`` and ``test_data`` are similar, except
|
||||
each contains only 10,000 images.
|
||||
|
||||
This is a nice data format, but for use in neural networks it's
|
||||
helpful to modify the format of the ``training_data`` a little.
|
||||
That's done in the wrapper function ``load_data_wrapper()``, see
|
||||
below.
|
||||
"""
|
||||
f = gzip.open('/Users/grant/cs/neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')
|
||||
training_data, validation_data, test_data = cPickle.load(f)
|
||||
f.close()
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def load_data_wrapper():
|
||||
"""Return a tuple containing ``(training_data, validation_data,
|
||||
test_data)``. Based on ``load_data``, but the format is more
|
||||
convenient for use in our implementation of neural networks.
|
||||
|
||||
In particular, ``training_data`` is a list containing 50,000
|
||||
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
|
||||
containing the input image. ``y`` is a 10-dimensional
|
||||
numpy.ndarray representing the unit vector corresponding to the
|
||||
correct digit for ``x``.
|
||||
|
||||
``validation_data`` and ``test_data`` are lists containing 10,000
|
||||
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
|
||||
numpy.ndarry containing the input image, and ``y`` is the
|
||||
corresponding classification, i.e., the digit values (integers)
|
||||
corresponding to ``x``.
|
||||
|
||||
Obviously, this means we're using slightly different formats for
|
||||
the training data and the validation / test data. These formats
|
||||
turn out to be the most convenient for use in our neural network
|
||||
code."""
|
||||
tr_d, va_d, te_d = load_data()
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
|
||||
training_results = [vectorized_result(y) for y in tr_d[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
|
||||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
|
|
|
@ -39,7 +39,8 @@ class Scene(Container):
|
|||
"name" : None,
|
||||
"always_continually_update" : False,
|
||||
"random_seed" : 0,
|
||||
"skip_to_animation_number" : None,
|
||||
"start_at_animation_number" : None,
|
||||
"end_at_animation_number" : None,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
Container.__init__(self, **kwargs) # Perhaps allow passing in a non-empty *mobjects parameter?
|
||||
|
@ -406,14 +407,17 @@ class Scene(Container):
|
|||
if len(args) == 0:
|
||||
warnings.warn("Called Scene.play with no animations")
|
||||
return
|
||||
if self.skip_to_animation_number:
|
||||
if self.num_plays + 1 == self.skip_to_animation_number:
|
||||
if self.start_at_animation_number:
|
||||
if self.num_plays == self.start_at_animation_number:
|
||||
self.skip_animations = False
|
||||
if self.end_at_animation_number:
|
||||
if self.num_plays >= self.end_at_animation_number:
|
||||
self.skip_animations = True
|
||||
return self #Don't even both with the rest...
|
||||
if self.skip_animations:
|
||||
kwargs["run_time"] = 0
|
||||
|
||||
animations = self.compile_play_args_to_animation_list(*args)
|
||||
self.num_plays += 1
|
||||
|
||||
sync_animation_run_times_and_rate_funcs(*animations, **kwargs)
|
||||
moving_mobjects = self.get_moving_mobjects(*animations)
|
||||
|
@ -429,6 +433,7 @@ class Scene(Container):
|
|||
self.mobjects_from_last_animation = moving_mobjects
|
||||
self.clean_up_animations(*animations)
|
||||
self.continual_update(0)
|
||||
self.num_plays += 1
|
||||
return self
|
||||
|
||||
def clean_up_animations(self, *animations):
|
||||
|
|
120
topics/light.py
120
topics/light.py
|
@ -32,6 +32,7 @@ AMBIENT_FULL = 0.5
|
|||
AMBIENT_DIMMED = 0.2
|
||||
SPOTLIGHT_FULL = 0.9
|
||||
SPOTLIGHT_DIMMED = 0.2
|
||||
LIGHTHOUSE_HEIGHT = 0.8
|
||||
|
||||
LIGHT_COLOR = YELLOW
|
||||
DEGREES = TAU/360
|
||||
|
@ -53,20 +54,24 @@ class LightSource(VMobject):
|
|||
# a spotlight
|
||||
# and a shadow
|
||||
CONFIG = {
|
||||
"source_point": ORIGIN,
|
||||
"source_point": VectorizedPoint(location = ORIGIN, stroke_width = 0, fill_opacity = 0),
|
||||
"color": LIGHT_COLOR,
|
||||
"num_levels": 10,
|
||||
"radius": 5,
|
||||
"screen": None,
|
||||
"opacity_function": inverse_quadratic(1,2,1),
|
||||
"max_opacity_ambient": AMBIENT_FULL,
|
||||
"max_opacity_spotlight": SPOTLIGHT_FULL
|
||||
"max_opacity_spotlight": SPOTLIGHT_FULL,
|
||||
"camera": None
|
||||
}
|
||||
|
||||
def generate_points(self):
|
||||
|
||||
self.add(self.source_point)
|
||||
|
||||
self.lighthouse = Lighthouse()
|
||||
self.ambient_light = AmbientLight(
|
||||
source_point = self.source_point,
|
||||
source_point = VectorizedPoint(location = self.get_source_point()),
|
||||
color = self.color,
|
||||
num_levels = self.num_levels,
|
||||
radius = self.radius,
|
||||
|
@ -75,23 +80,24 @@ class LightSource(VMobject):
|
|||
)
|
||||
if self.has_screen():
|
||||
self.spotlight = Spotlight(
|
||||
source_point = self.source_point,
|
||||
source_point = VectorizedPoint(location = self.get_source_point()),
|
||||
color = self.color,
|
||||
num_levels = self.num_levels,
|
||||
radius = self.radius,
|
||||
screen = self.screen,
|
||||
opacity_function = self.opacity_function,
|
||||
max_opacity = self.max_opacity_spotlight
|
||||
max_opacity = self.max_opacity_spotlight,
|
||||
camera = self.camera
|
||||
)
|
||||
else:
|
||||
self.spotlight = Spotlight()
|
||||
|
||||
self.shadow = VMobject(fill_color = SHADOW_COLOR, fill_opacity = 1.0, stroke_color = BLACK)
|
||||
self.lighthouse.next_to(self.source_point,DOWN,buff = 0)
|
||||
self.ambient_light.move_source_to(self.source_point)
|
||||
self.lighthouse.next_to(self.get_source_point(),DOWN,buff = 0)
|
||||
self.ambient_light.move_source_to(self.get_source_point())
|
||||
|
||||
if self.has_screen():
|
||||
self.spotlight.move_source_to(self.source_point)
|
||||
self.spotlight.move_source_to(self.get_source_point())
|
||||
self.update_shadow()
|
||||
|
||||
self.add(self.ambient_light,self.spotlight,self.lighthouse, self.shadow)
|
||||
|
@ -113,29 +119,36 @@ class LightSource(VMobject):
|
|||
self.max_opacity_spotlight = new_opacity
|
||||
self.spotlight.dimming(new_opacity)
|
||||
|
||||
def set_camera(self,new_cam):
|
||||
self.camera = new_cam
|
||||
self.spotlight.camera = new_cam
|
||||
|
||||
|
||||
def set_screen(self, new_screen):
|
||||
if self.has_screen():
|
||||
self.spotlight.screen = new_screen
|
||||
else:
|
||||
# Note: See below
|
||||
# index = self.submobjects.index(self.spotlight)
|
||||
index = self.submobjects.index(self.spotlight)
|
||||
camera = self.spotlight.camera
|
||||
self.remove(self.spotlight)
|
||||
self.spotlight = Spotlight(
|
||||
source_point = self.source_point,
|
||||
source_point = VectorizedPoint(location = self.get_source_point()),
|
||||
color = self.color,
|
||||
num_levels = self.num_levels,
|
||||
radius = self.radius,
|
||||
screen = new_screen
|
||||
screen = new_screen,
|
||||
camera = self.camera
|
||||
)
|
||||
self.spotlight.move_source_to(self.source_point)
|
||||
self.spotlight.move_source_to(self.get_source_point())
|
||||
|
||||
# Note: This line will make spotlight show up at the end
|
||||
# of the submojects list, which can make it show up on
|
||||
# top of the shadow. To make it show up in the
|
||||
# same spot, you could try the following line,
|
||||
# where "index" is what I defined above:
|
||||
# self.submobjects.insert(index, self.spotlight)
|
||||
self.add(self.spotlight)
|
||||
self.submobjects.insert(index, self.spotlight)
|
||||
#self.add(self.spotlight)
|
||||
|
||||
# in any case
|
||||
self.screen = new_screen
|
||||
|
@ -145,13 +158,16 @@ class LightSource(VMobject):
|
|||
|
||||
def move_source_to(self,point):
|
||||
apoint = np.array(point)
|
||||
v = apoint - self.source_point
|
||||
v = apoint - self.get_source_point()
|
||||
# Note: As discussed, things stand to behave better if source
|
||||
# point is a submobject, so that it automatically interpolates
|
||||
# during an animation, and other updates can be defined wrt
|
||||
# that source point's location
|
||||
self.source_point = apoint
|
||||
self.lighthouse.next_to(apoint,DOWN,buff = 0)
|
||||
self.source_point.set_location(apoint)
|
||||
#self.lighthouse.next_to(apoint,DOWN,buff = 0)
|
||||
#self.ambient_light.move_source_to(apoint)
|
||||
self.lighthouse.shift(v)
|
||||
#self.ambient_light.shift(v)
|
||||
self.ambient_light.move_source_to(apoint)
|
||||
if self.has_screen():
|
||||
self.spotlight.move_source_to(apoint)
|
||||
|
@ -167,19 +183,20 @@ class LightSource(VMobject):
|
|||
self.spotlight.update_sectors()
|
||||
self.update_shadow()
|
||||
|
||||
def get_source_point(self):
|
||||
return self.source_point.get_location()
|
||||
|
||||
def update_shadow(self):
|
||||
|
||||
point = self.source_point
|
||||
point = self.get_source_point()
|
||||
projected_screen_points = []
|
||||
if not self.has_screen():
|
||||
return
|
||||
for point in self.screen.get_anchors():
|
||||
projected_screen_points.append(self.spotlight.project(point))
|
||||
|
||||
# print "projected", self.screen.get_anchors(), "onto", projected_screen_points
|
||||
|
||||
projected_source = project_along_vector(self.source_point,self.spotlight.projection_direction())
|
||||
projected_source = project_along_vector(self.get_source_point(),self.spotlight.projection_direction())
|
||||
|
||||
projected_point_cloud_3d = np.append(
|
||||
projected_screen_points,
|
||||
|
@ -197,7 +214,7 @@ class LightSource(VMobject):
|
|||
hull = []
|
||||
|
||||
# we also need the projected source point
|
||||
source_point_2d = np.dot(self.spotlight.project(self.source_point),back_rotation_matrix.T)[:2]
|
||||
source_point_2d = np.dot(self.spotlight.project(self.get_source_point()),back_rotation_matrix.T)[:2]
|
||||
|
||||
index = 0
|
||||
for point in point_cloud_2d[hull_2d.vertices]:
|
||||
|
@ -274,7 +291,7 @@ class SwitchOff(LaggedStart):
|
|||
class Lighthouse(SVGMobject):
|
||||
CONFIG = {
|
||||
"file_name" : "lighthouse",
|
||||
"height" : 0.5
|
||||
"height" : LIGHTHOUSE_HEIGHT
|
||||
}
|
||||
|
||||
def move_to(self,point):
|
||||
|
@ -292,7 +309,7 @@ class AmbientLight(VMobject):
|
|||
# * the number of subdivisions (levels, annuli)
|
||||
|
||||
CONFIG = {
|
||||
"source_point" : ORIGIN,
|
||||
"source_point": VectorizedPoint(location = ORIGIN, stroke_width = 0, fill_opacity = 0),
|
||||
"opacity_function" : lambda r : 1.0/(r+1.0)**2,
|
||||
"color" : LIGHT_COLOR,
|
||||
"max_opacity" : 1.0,
|
||||
|
@ -301,8 +318,6 @@ class AmbientLight(VMobject):
|
|||
}
|
||||
|
||||
def generate_points(self):
|
||||
self.source_point = np.array(self.source_point)
|
||||
|
||||
# in theory, this method is only called once, right?
|
||||
# so removing submobs shd not be necessary
|
||||
#
|
||||
|
@ -312,6 +327,8 @@ class AmbientLight(VMobject):
|
|||
for submob in self.submobjects:
|
||||
self.remove(submob)
|
||||
|
||||
self.add(self.source_point)
|
||||
|
||||
# create annuli
|
||||
self.radius = float(self.radius)
|
||||
dr = self.radius / self.num_levels
|
||||
|
@ -323,22 +340,29 @@ class AmbientLight(VMobject):
|
|||
color = self.color,
|
||||
fill_opacity = alpha
|
||||
)
|
||||
annulus.move_arc_center_to(self.source_point)
|
||||
annulus.move_to(self.get_source_point())
|
||||
self.add(annulus)
|
||||
|
||||
|
||||
|
||||
def move_source_to(self,point):
|
||||
# Note: Best to rewrite in terms of VectorizedPoint source_point
|
||||
v = np.array(point) - self.source_point
|
||||
self.source_point = np.array(point)
|
||||
self.shift(v)
|
||||
#old_source_point = self.get_source_point()
|
||||
#self.shift(point - old_source_point)
|
||||
self.move_to(point)
|
||||
|
||||
return self
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get_source_point(self):
|
||||
return self.source_point.get_location()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def dimming(self,new_alpha):
|
||||
|
@ -350,10 +374,23 @@ class AmbientLight(VMobject):
|
|||
submob.set_fill(opacity = new_submob_alpha)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class Spotlight(VMobject):
|
||||
|
||||
CONFIG = {
|
||||
"source_point" : ORIGIN,
|
||||
"source_point": VectorizedPoint(location = ORIGIN, stroke_width = 0, fill_opacity = 0),
|
||||
"opacity_function" : lambda r : 1.0/(r/2+1.0)**2,
|
||||
"color" : LIGHT_COLOR,
|
||||
"max_opacity" : 1.0,
|
||||
|
@ -379,10 +416,17 @@ class Spotlight(VMobject):
|
|||
w = project_along_vector(point,v)
|
||||
return w
|
||||
|
||||
|
||||
def get_source_point(self):
|
||||
return self.source_point.get_location()
|
||||
|
||||
|
||||
def generate_points(self):
|
||||
|
||||
self.submobjects = []
|
||||
|
||||
self.add(self.source_point)
|
||||
|
||||
if self.screen != None:
|
||||
# look for the screen and create annular sectors
|
||||
lower_angle, upper_angle = self.viewing_angles(self.screen)
|
||||
|
@ -418,18 +462,19 @@ class Spotlight(VMobject):
|
|||
projected_RIGHT = self.project(RIGHT)
|
||||
omega = angle_between_vectors(rotated_RIGHT,projected_RIGHT)
|
||||
annular_sector.rotate(omega, axis = self.projection_direction())
|
||||
annular_sector.move_arc_center_to(self.source_point)
|
||||
annular_sector.move_arc_center_to(self.get_source_point())
|
||||
|
||||
return annular_sector
|
||||
|
||||
def viewing_angle_of_point(self,point):
|
||||
# as measured from the positive x-axis
|
||||
v1 = self.project(RIGHT)
|
||||
v2 = self.project(np.array(point) - self.source_point)
|
||||
v2 = self.project(np.array(point) - self.get_source_point())
|
||||
absolute_angle = angle_between_vectors(v1, v2)
|
||||
# determine the angle's sign depending on their plane's
|
||||
# choice of orientation. That choice is set by the camera
|
||||
# position, i. e. projection direction
|
||||
|
||||
if np.dot(self.projection_direction(),np.cross(v1, v2)) > 0:
|
||||
return absolute_angle
|
||||
else:
|
||||
|
@ -474,7 +519,9 @@ class Spotlight(VMobject):
|
|||
return u
|
||||
|
||||
def move_source_to(self,point):
|
||||
self.source_point = np.array(point)
|
||||
self.source_point.set_location(np.array(point))
|
||||
#self.source_point.move_to(np.array(point))
|
||||
#self.move_to(point)
|
||||
self.update_sectors()
|
||||
return self
|
||||
|
||||
|
@ -485,9 +532,12 @@ class Spotlight(VMobject):
|
|||
for submob in self.submobject_family():
|
||||
if type(submob) == AnnularSector:
|
||||
lower_angle, upper_angle = self.viewing_angles(self.screen)
|
||||
dr = submob.outer_radius - submob.inner_radius
|
||||
#dr = submob.outer_radius - submob.inner_radius
|
||||
dr = self.radius / self.num_levels
|
||||
new_submob = self.new_sector(submob.inner_radius,dr,lower_angle,upper_angle)
|
||||
submob.points = new_submob.points
|
||||
submob.set_fill(opacity = 10 * self.opacity_function(submob.outer_radius))
|
||||
print "new opacity:", self.opacity_function(submob.outer_radius)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -136,8 +136,6 @@ class NumberLine(VMobject):
|
|||
self.tip = tip
|
||||
self.add(tip)
|
||||
|
||||
|
||||
|
||||
class UnitInterval(NumberLine):
|
||||
CONFIG = {
|
||||
"x_min" : 0,
|
||||
|
|
|
@ -40,22 +40,17 @@ class ThreeDCamera(CameraWithPerspective):
|
|||
self.rotation_mobject = VectorizedPoint()
|
||||
self.set_position(self.phi, self.theta, self.distance)
|
||||
|
||||
def get_color(self, method):
|
||||
color = method()
|
||||
vmobject = method.im_self
|
||||
def modified_rgb(self, vmobject, rgb):
|
||||
if should_shade_in_3d(vmobject):
|
||||
return Color(rgb = self.get_shaded_rgb(
|
||||
color_to_rgb(color),
|
||||
normal_vect = self.get_unit_normal_vect(vmobject)
|
||||
))
|
||||
return self.get_shaded_rgb(rgb, self.get_unit_normal_vect(vmobject))
|
||||
else:
|
||||
return color
|
||||
|
||||
def get_stroke_color(self, vmobject):
|
||||
return self.get_color(vmobject.get_stroke_color)
|
||||
def get_stroke_rgb(self, vmobject):
|
||||
return self.modified_rgb(vmobject, vmobject.get_stroke_rgb())
|
||||
|
||||
def get_fill_color(self, vmobject):
|
||||
return self.get_color(vmobject.get_fill_color)
|
||||
def get_fill_rgb(self, vmobject):
|
||||
return self.modified_rgb(vmobject, vmobject.get_fill_rgb())
|
||||
|
||||
def get_shaded_rgb(self, rgb, normal_vect):
|
||||
brightness = np.dot(normal_vect, self.unit_sun_vect)**2
|
||||
|
|
Loading…
Add table
Reference in a new issue