mirror of
https://github.com/3b1b/manim.git
synced 2025-08-05 16:49:03 +00:00
Merge branch 'master' into lighthouse2
This commit is contained in:
commit
7dde8cfdf2
13 changed files with 747 additions and 226 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -8,6 +8,8 @@ prettiness_hall_of_fame.py
|
|||
files/
|
||||
ben_playground.py
|
||||
ben_cairo_test.py
|
||||
.floo
|
||||
.flooignore
|
||||
*.xml
|
||||
*.iml
|
||||
|
||||
|
|
|
@ -30,18 +30,48 @@ from mobject.svg_mobject import *
|
|||
from mobject.tex_mobject import *
|
||||
from topics.graph_scene import *
|
||||
|
||||
USE_ALMOST_FOURIER_BY_DEFAULT = True
|
||||
NUM_SAMPLES_FOR_FFT = 1000
|
||||
|
||||
|
||||
def get_fourier_graph(
|
||||
axes, time_func, t_min, t_max,
|
||||
n_samples = NUM_SAMPLES_FOR_FFT,
|
||||
complex_to_real_func = lambda z : z.real,
|
||||
color = RED,
|
||||
):
|
||||
# N = n_samples
|
||||
# T = time_range/n_samples
|
||||
time_range = float(t_max - t_min)
|
||||
time_step_size = time_range/n_samples
|
||||
time_samples = time_func(np.linspace(t_min, t_max, n_samples))
|
||||
fft_output = np.fft.fft(time_samples)
|
||||
frequencies = np.linspace(0.0, n_samples/(2.0*time_range), n_samples//2)
|
||||
# #Cycles per second of fouier_samples[1]
|
||||
# (1/time_range)*n_samples
|
||||
# freq_step_size = 1./time_range
|
||||
graph = VMobject()
|
||||
graph.set_points_smoothly([
|
||||
axes.coords_to_point(
|
||||
x, 200.0*complex_to_real_func(y)/n_samples,
|
||||
)
|
||||
for x, y in zip(frequencies, fft_output[:n_samples//2])
|
||||
])
|
||||
graph.highlight(color)
|
||||
return graph
|
||||
|
||||
def get_fourier_transform(
|
||||
func, t_min, t_max,
|
||||
real_part = True,
|
||||
use_almost_fourier = True,
|
||||
complex_to_real_func = lambda z : z.real,
|
||||
use_almost_fourier = USE_ALMOST_FOURIER_BY_DEFAULT,
|
||||
):
|
||||
# part = "real" if real_part else "imag"
|
||||
trig = np.cos if real_part else np.sin
|
||||
scalar = 1./(t_max - t_min) if use_almost_fourier else 1.0
|
||||
def fourier_transform(f):
|
||||
return scalar*scipy.integrate.quad(
|
||||
lambda t : func(t)*trig(-TAU*f*t),
|
||||
lambda t : complex_to_real_func(
|
||||
# f(t) e^{-TAU*i*f*t}
|
||||
func(t)*np.exp(complex(0, -TAU*f*t))
|
||||
),
|
||||
t_min, t_max
|
||||
)[0]
|
||||
return fourier_transform
|
||||
|
|
391
active_projects/uncertainty.py
Normal file
391
active_projects/uncertainty.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
from helpers import *
|
||||
import scipy
|
||||
|
||||
from animation.animation import Animation
|
||||
from animation.transform import *
|
||||
from animation.simple_animations import *
|
||||
from animation.playground import *
|
||||
from animation.continual_animation import *
|
||||
from topics.geometry import *
|
||||
from topics.characters import *
|
||||
from topics.functions import *
|
||||
from topics.fractals import *
|
||||
from topics.number_line import *
|
||||
from topics.combinatorics import *
|
||||
from topics.numerals import *
|
||||
from topics.three_dimensions import *
|
||||
from topics.objects import *
|
||||
from topics.probability import *
|
||||
from topics.complex_numbers import *
|
||||
from topics.common_scenes import *
|
||||
from scene import Scene
|
||||
from scene.reconfigurable_scene import ReconfigurableScene
|
||||
from scene.zoomed_scene import *
|
||||
from camera import Camera
|
||||
from mobject import *
|
||||
from mobject.image_mobject import *
|
||||
from mobject.vectorized_mobject import *
|
||||
from mobject.svg_mobject import *
|
||||
from mobject.tex_mobject import *
|
||||
from topics.graph_scene import *
|
||||
|
||||
from active_projects.fourier import *
|
||||
|
||||
|
||||
FREQUENCY_COLOR = RED
|
||||
USE_ALMOST_FOURIER_BY_DEFAULT = False
|
||||
|
||||
class GaussianDistributionWrapper(Line):
|
||||
"""
|
||||
This is meant to encode a 2d normal distribution as
|
||||
a mobject (so as to be able to have it be interpolated
|
||||
during animations). It is a line whose start_point coordinates
|
||||
encode the coordinates of mu, and whose end_point - start_point
|
||||
encodes the coordinates of sigma.
|
||||
"""
|
||||
CONFIG = {
|
||||
"stroke_width" : 0,
|
||||
"mu_x" : 0,
|
||||
"sigma_x" : 1,
|
||||
"mu_y" : 0,
|
||||
"sigma_y" : 0,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
Line.__init__(self, ORIGIN, RIGHT, **kwargs)
|
||||
self.change_parameters(self.mu_x, self.mu_y, self.sigma_x, self.sigma_y)
|
||||
|
||||
def change_parameters(self, mu_x = None, mu_y = None, sigma_x = None, sigma_y = None):
|
||||
curr_parameters = self.get_parameteters()
|
||||
args = [mu_x, mu_y, sigma_x, sigma_y]
|
||||
new_parameters = [
|
||||
arg or curr
|
||||
for curr, arg in zip(curr_parameters, args)
|
||||
]
|
||||
mu_x, mu_y, sigma_x, sigma_y = new_parameters
|
||||
mu_point = mu_x*RIGHT + mu_y*UP
|
||||
sigma_vect = sigma_x*RIGHT + sigma_y*UP
|
||||
self.put_start_and_end_on(mu_point, mu_point + sigma_vect)
|
||||
return self
|
||||
|
||||
def get_parameteters(self):
|
||||
""" Return mu_x, mu_y, sigma_x, sigma_y"""
|
||||
start, end = self.get_start_and_end()
|
||||
return tuple(it.chain(start[:2], (end - start)[:2]))
|
||||
|
||||
def get_random_points(self, size = 1):
|
||||
mu_x, mu_y, sigma_x, sigma_y = self.get_parameteters()
|
||||
x_vals = np.random.normal(mu_x, sigma_x, size)
|
||||
y_vals = np.random.normal(mu_y, sigma_y, size)
|
||||
return np.array([
|
||||
x*RIGHT + y*UP
|
||||
for x, y in zip(x_vals, y_vals)
|
||||
])
|
||||
|
||||
class ProbabalisticMobjectCloud(ContinualAnimation):
|
||||
CONFIG = {
|
||||
"fill_opacity" : 0.25,
|
||||
"n_copies" : 100,
|
||||
"gaussian_distribution_wrapper_config" : {
|
||||
"sigma_x" : 1,
|
||||
}
|
||||
}
|
||||
def __init__(self, prototype, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
fill_opacity = self.fill_opacity or prototype.get_fill_opacity()
|
||||
self.gaussian_distribution_wrapper = GaussianDistributionWrapper(
|
||||
**self.gaussian_distribution_wrapper_config
|
||||
)
|
||||
group = VGroup(*[
|
||||
prototype.copy().set_fill(opacity = fill_opacity)
|
||||
for x in range(self.n_copies)
|
||||
])
|
||||
ContinualAnimation.__init__(self, group, **kwargs)
|
||||
|
||||
def update_mobject(self, dt):
|
||||
group = self.mobject
|
||||
points = self.gaussian_distribution_wrapper.get_random_points(len(group))
|
||||
for mob, point in zip(group, points):
|
||||
self.update_mobject_by_point(mob, point)
|
||||
return self
|
||||
|
||||
def update_mobject_by_point(self, mobject, point):
|
||||
mobject.move_to(point)
|
||||
return self
|
||||
|
||||
class ProbabalisticDotCloud(ProbabalisticMobjectCloud):
|
||||
CONFIG = {
|
||||
"color" : BLUE,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
dot = Dot(color = self.color)
|
||||
ProbabalisticMobjectCloud.__init__(self, dot)
|
||||
|
||||
class ProbabalisticVectorCloud(ProbabalisticMobjectCloud):
|
||||
CONFIG = {
|
||||
"color" : RED,
|
||||
"n_copies" : 20,
|
||||
"fill_opacity" : 0.5,
|
||||
"center_func" : lambda : ORIGIN,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
digest_config(self, kwargs)
|
||||
vector = Vector(
|
||||
RIGHT, color = self.color,
|
||||
max_tip_length_to_length_ratio = 1,
|
||||
)
|
||||
ProbabalisticMobjectCloud.__init__(self, vector)
|
||||
|
||||
def update_mobject_by_point(self, vector, point):
|
||||
vector.put_start_and_end_on(
|
||||
self.center_func(),
|
||||
point
|
||||
)
|
||||
|
||||
###################
|
||||
|
||||
class MentionUncertaintyPrinciple(TeacherStudentsScene):
|
||||
def construct(self):
|
||||
title = TextMobject("Heisenberg Uncertainty Principle")
|
||||
title.to_edge(UP)
|
||||
|
||||
dot_cloud = ProbabalisticDotCloud()
|
||||
vector_cloud = ProbabalisticVectorCloud(
|
||||
gaussian_distribution_wrapper_config = {"sigma_x" : 0.2},
|
||||
center_func = dot_cloud.gaussian_distribution_wrapper.get_start,
|
||||
)
|
||||
for cloud in dot_cloud, vector_cloud:
|
||||
gdw = cloud.gaussian_distribution_wrapper
|
||||
gdw.move_to(title.get_center(), LEFT)
|
||||
gdw.shift(2*DOWN)
|
||||
vector_cloud.gaussian_distribution_wrapper.shift(3*RIGHT)
|
||||
|
||||
def get_brace_text_group_update(gdw, vect, text):
|
||||
brace = Brace(gdw, vect)
|
||||
text = brace.get_tex("\\sigma_{\\text{%s}}"%text, buff = SMALL_BUFF)
|
||||
group = VGroup(brace, text)
|
||||
def update_group(group):
|
||||
brace, text = group
|
||||
brace.match_width(gdw, stretch = True)
|
||||
brace.next_to(gdw, vect)
|
||||
text.next_to(brace, vect, buff = SMALL_BUFF)
|
||||
return ContinualUpdateFromFunc(group, update_group)
|
||||
|
||||
dot_brace_anim = get_brace_text_group_update(
|
||||
dot_cloud.gaussian_distribution_wrapper,
|
||||
DOWN, "position",
|
||||
)
|
||||
vector_brace_anim = get_brace_text_group_update(
|
||||
vector_cloud.gaussian_distribution_wrapper,
|
||||
UP, "momentum",
|
||||
)
|
||||
|
||||
self.add(title)
|
||||
self.add(dot_cloud)
|
||||
self.play(
|
||||
Write(title),
|
||||
self.teacher.change, "raise_right_hand",
|
||||
self.get_student_changes(*["pondering"]*3)
|
||||
)
|
||||
self.play(
|
||||
Write(dot_brace_anim.mobject, run_time = 1)
|
||||
)
|
||||
self.add(dot_brace_anim)
|
||||
self.wait()
|
||||
# self.wait(2)
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
run_time = 2,
|
||||
)
|
||||
self.wait()
|
||||
self.add(vector_cloud)
|
||||
self.play(
|
||||
FadeIn(vector_brace_anim.mobject)
|
||||
)
|
||||
self.add(vector_brace_anim)
|
||||
self.play(
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 1},
|
||||
self.get_student_changes(*3*["confused"]),
|
||||
run_time = 3,
|
||||
)
|
||||
#Back and forth
|
||||
for x in range(2):
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 2},
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
run_time = 3,
|
||||
)
|
||||
self.change_student_modes("thinking", "erm", "sassy")
|
||||
self.play(
|
||||
dot_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 0.1},
|
||||
vector_cloud.gaussian_distribution_wrapper.change_parameters,
|
||||
{"sigma_x" : 1},
|
||||
run_time = 3,
|
||||
)
|
||||
self.wait()
|
||||
|
||||
class FourierTradeoff(Scene):
|
||||
def construct(self):
|
||||
#Setup axes
|
||||
time_mean = 4
|
||||
time_axes = Axes(
|
||||
x_min = 0,
|
||||
x_max = 2*time_mean,
|
||||
x_axis_config = {"unit_size" : 1.5},
|
||||
y_min = -2,
|
||||
y_max = 2,
|
||||
y_axis_config = {"unit_size" : 0.5}
|
||||
)
|
||||
time_label = TextMobject("Time")
|
||||
time_label.next_to(
|
||||
time_axes.x_axis.get_right(), UP,
|
||||
buff = MED_SMALL_BUFF,
|
||||
)
|
||||
time_axes.add(time_label)
|
||||
time_axes.center().to_edge(UP)
|
||||
time_axes.x_axis.add_numbers(*range(1, 2*time_mean))
|
||||
|
||||
frequency_axes = Axes(
|
||||
x_min = 0,
|
||||
x_max = 8,
|
||||
x_axis_config = {"unit_size" : 1.5},
|
||||
y_min = 0,
|
||||
y_max = 15,
|
||||
y_axis_config = {
|
||||
"unit_size" : 0.15,
|
||||
"tick_frequency" : 5,
|
||||
},
|
||||
color = TEAL,
|
||||
)
|
||||
frequency_label = TextMobject("Frequency")
|
||||
frequency_label.next_to(
|
||||
frequency_axes.x_axis.get_right(), UP,
|
||||
buff = MED_SMALL_BUFF,
|
||||
)
|
||||
frequency_label.highlight(FREQUENCY_COLOR)
|
||||
frequency_axes.add(frequency_label)
|
||||
frequency_axes.move_to(time_axes, LEFT)
|
||||
frequency_axes.to_edge(DOWN, buff = LARGE_BUFF)
|
||||
frequency_axes.x_axis.add_numbers()
|
||||
|
||||
# Graph information
|
||||
|
||||
#x-coordinate of this point determines width of wave_packet graph
|
||||
width_tracker = VectorizedPoint(0.5*RIGHT)
|
||||
def get_width():
|
||||
return width_tracker.get_center()[0]
|
||||
|
||||
def get_wave_packet_function():
|
||||
factor = 1./get_width()
|
||||
return lambda t : np.sqrt(factor)*np.cos(4*TAU*t)*np.exp(-factor*(t-time_mean)**2)
|
||||
|
||||
def get_wave_packet():
|
||||
graph = time_axes.get_graph(
|
||||
get_wave_packet_function(),
|
||||
num_graph_points = 200,
|
||||
)
|
||||
graph.highlight(YELLOW)
|
||||
return graph
|
||||
|
||||
time_radius = 10
|
||||
def get_wave_packet_fourier_transform():
|
||||
return get_fourier_graph(
|
||||
frequency_axes, get_wave_packet_function(),
|
||||
t_min = time_mean - time_radius,
|
||||
t_max = time_mean + time_radius,
|
||||
n_samples = 2*time_radius*17,
|
||||
complex_to_real_func = abs,
|
||||
color = FREQUENCY_COLOR,
|
||||
)
|
||||
|
||||
wave_packet = get_wave_packet()
|
||||
wave_packet_update = UpdateFromFunc(
|
||||
wave_packet,
|
||||
lambda g : Transform(g, get_wave_packet()).update(1)
|
||||
)
|
||||
fourier_graph = get_wave_packet_fourier_transform()
|
||||
fourier_graph_update = UpdateFromFunc(
|
||||
fourier_graph,
|
||||
lambda g : Transform(g, get_wave_packet_fourier_transform()).update(1)
|
||||
)
|
||||
|
||||
arrow = Arrow(
|
||||
wave_packet, frequency_axes.coords_to_point(4, 10),
|
||||
color = FREQUENCY_COLOR,
|
||||
)
|
||||
fourier_words = TextMobject("Fourier Transform")
|
||||
fourier_words.next_to(arrow, RIGHT, buff = MED_LARGE_BUFF)
|
||||
sub_words = TextMobject("(To be explained shortly)")
|
||||
sub_words.highlight(BLUE)
|
||||
sub_words.scale(0.75)
|
||||
sub_words.next_to(fourier_words, DOWN)
|
||||
|
||||
#Draw items
|
||||
self.add(time_axes, frequency_axes)
|
||||
self.play(ShowCreation(wave_packet))
|
||||
self.play(
|
||||
ReplacementTransform(
|
||||
wave_packet.copy(),
|
||||
fourier_graph,
|
||||
),
|
||||
GrowArrow(arrow),
|
||||
Write(fourier_words, run_time = 1)
|
||||
)
|
||||
# self.play(FadeOut(arrow))
|
||||
self.wait()
|
||||
for width in 6, 0.1, 1:
|
||||
self.play(
|
||||
width_tracker.move_to, width*RIGHT,
|
||||
wave_packet_update,
|
||||
fourier_graph_update,
|
||||
run_time = 3
|
||||
)
|
||||
if sub_words not in self.mobjects:
|
||||
self.play(FadeIn(sub_words))
|
||||
else:
|
||||
self.wait()
|
||||
self.wait()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ class Transform(Animation):
|
|||
self.path_arc,
|
||||
self.path_arc_axis,
|
||||
)
|
||||
|
||||
|
||||
def get_all_mobjects(self):
|
||||
return self.mobject, self.starting_mobject, self.target_mobject
|
||||
|
||||
|
|
236
camera/camera.py
236
camera/camera.py
|
@ -8,7 +8,7 @@ import aggdraw
|
|||
|
||||
from helpers import *
|
||||
from mobject import Mobject, PMobject, VMobject, \
|
||||
ImageMobject, Group, BackgroundColoredVMobject
|
||||
ImageMobject, Group
|
||||
|
||||
class Camera(object):
|
||||
CONFIG = {
|
||||
|
@ -31,7 +31,11 @@ class Camera(object):
|
|||
"image_mode" : "RGBA",
|
||||
"n_rgb_coords" : 4,
|
||||
"background_alpha" : 0, #Out of color_max_val
|
||||
"pixel_array_dtype" : 'uint8'
|
||||
"pixel_array_dtype" : 'uint8',
|
||||
"use_z_coordinate_for_display_order" : False,
|
||||
# z_buff_func is only used if the flag above is set to True.
|
||||
# round z coordinate to nearest hundredth when comparring
|
||||
"z_buff_func" : lambda m : np.round(m.get_center()[2], 2),
|
||||
}
|
||||
|
||||
def __init__(self, background = None, **kwargs):
|
||||
|
@ -94,7 +98,12 @@ class Camera(object):
|
|||
return retval
|
||||
|
||||
def set_pixel_array(self, pixel_array, convert_from_floats = False):
|
||||
self.pixel_array = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
converted_array = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
if not hasattr(self, "pixel_array"): #TODO: And the shapes match?
|
||||
self.pixel_array = converted_array
|
||||
else:
|
||||
#Set in place
|
||||
self.pixel_array[:,:,:] = converted_array[:,:,:]
|
||||
|
||||
def set_background(self, pixel_array, convert_from_floats = False):
|
||||
self.background = self.convert_pixel_array(pixel_array, convert_from_floats)
|
||||
|
@ -141,8 +150,6 @@ class Camera(object):
|
|||
self, mobjects,
|
||||
include_submobjects = True,
|
||||
excluded_mobjects = None,
|
||||
#Round z coordinate to nearest hundredth when comparring
|
||||
z_buff_func = lambda m : np.round(m.get_center()[2], 2)
|
||||
):
|
||||
if include_submobjects:
|
||||
mobjects = self.extract_mobject_family_members(
|
||||
|
@ -154,10 +161,16 @@ class Camera(object):
|
|||
)
|
||||
mobjects = list_difference_update(mobjects, all_excluded)
|
||||
|
||||
# Should perhaps think about what happens here when include_submobjects is False,
|
||||
# (for now, the onus is then on the caller to ensure this is handled correctly by
|
||||
# passing us an appropriately pre-flattened list of mobjects if need be)
|
||||
return sorted(mobjects, lambda a, b: cmp(z_buff_func(a), z_buff_func(b)))
|
||||
if self.use_z_coordinate_for_display_order:
|
||||
# Should perhaps think about what happens here when include_submobjects is False,
|
||||
# (for now, the onus is then on the caller to ensure this is handled correctly by
|
||||
# passing us an appropriately pre-flattened list of mobjects if need be)
|
||||
return sorted(
|
||||
mobjects,
|
||||
lambda a, b: cmp(self.z_buff_func(a), self.z_buff_func(b))
|
||||
)
|
||||
else:
|
||||
return mobjects
|
||||
|
||||
def capture_mobject(self, mobject, **kwargs):
|
||||
return self.capture_mobjects([mobject], **kwargs)
|
||||
|
@ -166,15 +179,13 @@ class Camera(object):
|
|||
mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
|
||||
vmobjects = []
|
||||
for mobject in mobjects:
|
||||
if isinstance(mobject, VMobject) and not isinstance(mobject, BackgroundColoredVMobject):
|
||||
vmobjects.append(mobject)
|
||||
if isinstance(mobject, VMobject):
|
||||
vmobjects.append(mobject)
|
||||
elif len(vmobjects) > 0:
|
||||
self.display_multiple_vectorized_mobjects(vmobjects)
|
||||
vmobjects = []
|
||||
|
||||
if isinstance(mobject, BackgroundColoredVMobject):
|
||||
self.display_background_colored_vmobject(mobject)
|
||||
elif isinstance(mobject, PMobject):
|
||||
if isinstance(mobject, PMobject):
|
||||
self.display_point_cloud(
|
||||
mobject.points, mobject.rgbas,
|
||||
self.adjusted_thickness(mobject.stroke_width)
|
||||
|
@ -190,37 +201,65 @@ class Camera(object):
|
|||
#TODO, more? Call out if it's unknown?
|
||||
self.display_multiple_vectorized_mobjects(vmobjects)
|
||||
|
||||
## Methods associated with svg rendering
|
||||
|
||||
def get_aggdraw_canvas(self):
|
||||
if not hasattr(self, "canvas"):
|
||||
self.reset_aggdraw_canvas()
|
||||
return self.canvas
|
||||
|
||||
def reset_aggdraw_canvas(self):
|
||||
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
|
||||
self.canvas = aggdraw.Draw(image)
|
||||
|
||||
def display_multiple_vectorized_mobjects(self, vmobjects):
|
||||
if len(vmobjects) == 0:
|
||||
return
|
||||
#More efficient to bundle together in one "canvas"
|
||||
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
|
||||
canvas = aggdraw.Draw(image)
|
||||
batches = batch_by_property(
|
||||
vmobjects,
|
||||
lambda vm : vm.get_background_image_file()
|
||||
)
|
||||
for batch in batches:
|
||||
if batch[0].get_background_image_file():
|
||||
self.display_multiple_background_colored_vmobject(batch)
|
||||
else:
|
||||
self.display_multiple_non_background_colored_vmobjects(batch)
|
||||
|
||||
def display_multiple_non_background_colored_vmobjects(self, vmobjects):
|
||||
self.reset_aggdraw_canvas()
|
||||
canvas = self.get_aggdraw_canvas()
|
||||
for vmobject in vmobjects:
|
||||
self.display_vectorized(vmobject, canvas)
|
||||
canvas.flush()
|
||||
|
||||
self.pixel_array[:,:] = image
|
||||
|
||||
def display_vectorized(self, vmobject, canvas):
|
||||
def display_vectorized(self, vmobject, canvas = None):
|
||||
if vmobject.is_subpath:
|
||||
#Subpath vectorized mobjects are taken care
|
||||
#of by their parent
|
||||
return
|
||||
canvas = canvas or self.get_aggdraw_canvas()
|
||||
pen, fill = self.get_pen_and_fill(vmobject)
|
||||
pathstring = self.get_pathstring(vmobject)
|
||||
symbol = aggdraw.Symbol(pathstring)
|
||||
canvas.symbol((0, 0), symbol, pen, fill)
|
||||
|
||||
def get_pen_and_fill(self, vmobject):
|
||||
pen = aggdraw.Pen(
|
||||
self.color_to_hex_l(self.get_stroke_color(vmobject)),
|
||||
max(vmobject.stroke_width, 0)
|
||||
)
|
||||
fill = aggdraw.Brush(
|
||||
self.color_to_hex_l(self.get_fill_color(vmobject)),
|
||||
opacity = int(self.color_max_val*vmobject.get_fill_opacity())
|
||||
)
|
||||
stroke_width = max(vmobject.get_stroke_width(), 0)
|
||||
if stroke_width == 0:
|
||||
pen = None
|
||||
else:
|
||||
stroke_rgb = self.get_stroke_rgb(vmobject)
|
||||
stroke_hex = rgb_to_hex(stroke_rgb)
|
||||
pen = aggdraw.Pen(stroke_hex, stroke_width)
|
||||
|
||||
fill_opacity = int(self.color_max_val*vmobject.get_fill_opacity())
|
||||
if fill_opacity == 0:
|
||||
fill = None
|
||||
else:
|
||||
fill_rgb = self.get_fill_rgb(vmobject)
|
||||
fill_hex = rgb_to_hex(fill_rgb)
|
||||
fill = aggdraw.Brush(fill_hex, fill_opacity)
|
||||
|
||||
return (pen, fill)
|
||||
|
||||
def color_to_hex_l(self, color):
|
||||
|
@ -229,57 +268,49 @@ class Camera(object):
|
|||
except:
|
||||
return Color(BLACK).get_hex_l()
|
||||
|
||||
def get_stroke_color(self, vmobject):
|
||||
return vmobject.get_stroke_color()
|
||||
def get_stroke_rgb(self, vmobject):
|
||||
return vmobject.get_stroke_rgb()
|
||||
|
||||
def get_fill_color(self, vmobject):
|
||||
return vmobject.get_fill_color()
|
||||
def get_fill_rgb(self, vmobject):
|
||||
return vmobject.get_fill_rgb()
|
||||
|
||||
def get_pathstring(self, vmobject):
|
||||
result = ""
|
||||
result = ""
|
||||
for mob in [vmobject]+vmobject.get_subpath_mobjects():
|
||||
points = mob.points
|
||||
# points = self.adjust_out_of_range_points(points)
|
||||
if len(points) == 0:
|
||||
continue
|
||||
points = self.align_points_to_camera(points)
|
||||
coords = self.points_to_pixel_coords(points)
|
||||
start = "M%d %d"%tuple(coords[0])
|
||||
#(handle1, handle2, anchor) tripletes
|
||||
triplets = zip(*[
|
||||
coords[i+1::3]
|
||||
for i in range(3)
|
||||
])
|
||||
cubics = [
|
||||
"C" + " ".join(map(str, it.chain(*triplet)))
|
||||
for triplet in triplets
|
||||
]
|
||||
end = "Z" if vmobject.mark_paths_closed else ""
|
||||
result += " ".join([start] + cubics + [end])
|
||||
aligned_points = self.align_points_to_camera(points)
|
||||
coords = self.points_to_pixel_coords(aligned_points)
|
||||
coord_strings = coords.flatten().astype(str)
|
||||
#Start new path string with M
|
||||
coord_strings[0] = "M" + coord_strings[0]
|
||||
#The C at the start of every 6th number communicates
|
||||
#that the following 6 define a cubic Bezier
|
||||
coord_strings[2::6] = map(lambda s : "C" + str(s), coord_strings[2::6])
|
||||
#Possibly finish with "Z"
|
||||
if vmobject.mark_paths_closed:
|
||||
coord_strings[-1] = coord_strings[-1] + " Z"
|
||||
result += " ".join(coord_strings)
|
||||
return result
|
||||
|
||||
def display_background_colored_vmobject(self, cvmobject):
|
||||
mob_array = np.zeros(
|
||||
self.pixel_array.shape,
|
||||
dtype = self.pixel_array_dtype
|
||||
)
|
||||
image = Image.fromarray(mob_array, mode = self.image_mode)
|
||||
canvas = aggdraw.Draw(image)
|
||||
self.display_vectorized(cvmobject, canvas)
|
||||
canvas.flush()
|
||||
cv_background = cvmobject.background_array
|
||||
if not np.all(self.pixel_array.shape == cv_background):
|
||||
cvmobject.resize_background_array_to_match(self.pixel_array)
|
||||
cv_background = cvmobject.background_array
|
||||
array = np.array(
|
||||
(np.array(mob_array).astype('float')/255.)*\
|
||||
np.array(cv_background),
|
||||
dtype = self.pixel_array_dtype
|
||||
)
|
||||
self.pixel_array[:,:] = np.maximum(
|
||||
self.pixel_array, array
|
||||
)
|
||||
def get_background_colored_vmobject_displayer(self):
|
||||
#Quite wordy to type out a bunch
|
||||
long_name = "background_colored_vmobject_displayer"
|
||||
if not hasattr(self, long_name):
|
||||
setattr(self, long_name, BackgroundColoredVMobjectDisplayer(self))
|
||||
return getattr(self, long_name)
|
||||
|
||||
def display_multiple_background_colored_vmobject(self, cvmobjects):
|
||||
displayer = self.get_background_colored_vmobject_displayer()
|
||||
cvmobject_pixel_array = displayer.display(*cvmobjects)
|
||||
self.pixel_array[:,:] = np.maximum(
|
||||
self.pixel_array, cvmobject_pixel_array
|
||||
)
|
||||
return self
|
||||
|
||||
## Methods for other rendering
|
||||
|
||||
def display_point_cloud(self, points, rgbas, thickness):
|
||||
if len(points) == 0:
|
||||
|
@ -475,6 +506,75 @@ class Camera(object):
|
|||
|
||||
return centered_space_coords
|
||||
|
||||
class BackgroundColoredVMobjectDisplayer(object):
|
||||
def __init__(self, camera):
|
||||
self.camera = camera
|
||||
self.file_name_to_pixel_array_map = {}
|
||||
self.init_canvas()
|
||||
|
||||
def init_canvas(self):
|
||||
self.pixel_array = np.zeros(
|
||||
self.camera.pixel_array.shape,
|
||||
dtype = self.camera.pixel_array_dtype,
|
||||
)
|
||||
self.reset_canvas()
|
||||
|
||||
def reset_canvas(self):
|
||||
image = Image.fromarray(self.pixel_array, mode = self.camera.image_mode)
|
||||
self.canvas = aggdraw.Draw(image)
|
||||
|
||||
def resize_background_array(
|
||||
self, background_array,
|
||||
new_width, new_height,
|
||||
mode = "RGBA"
|
||||
):
|
||||
image = Image.fromarray(background_array, mode = mode)
|
||||
resized_image = image.resize((new_width, new_height))
|
||||
return np.array(resized_image)
|
||||
|
||||
def resize_background_array_to_match(self, background_array, pixel_array):
|
||||
height, width = pixel_array.shape[:2]
|
||||
mode = "RGBA" if pixel_array.shape[2] == 4 else "RGB"
|
||||
return self.resize_background_array(background_array, width, height, mode)
|
||||
|
||||
def get_background_array(self, cvmobject):
|
||||
file_name = cvmobject.get_background_image_file()
|
||||
if file_name in self.file_name_to_pixel_array_map:
|
||||
return self.file_name_to_pixel_array_map[file_name]
|
||||
full_path = get_full_raster_image_path(file_name)
|
||||
image = Image.open(full_path)
|
||||
array = np.array(image)
|
||||
|
||||
camera = self.camera
|
||||
if not np.all(camera.pixel_array.shape == array.shape):
|
||||
array = self.resize_background_array_to_match(array, camera.pixel_array)
|
||||
|
||||
self.file_name_to_pixel_array_map[file_name] = array
|
||||
return array
|
||||
|
||||
def display(self, *cvmobjects):
|
||||
batches = batch_by_property(
|
||||
cvmobjects, lambda cv : cv.get_background_image_file()
|
||||
)
|
||||
curr_array = None
|
||||
for batch in batches:
|
||||
background_array = self.get_background_array(batch[0])
|
||||
for cvmobject in batch:
|
||||
self.camera.display_vectorized(cvmobject, self.canvas)
|
||||
self.canvas.flush()
|
||||
new_array = np.array(
|
||||
(background_array*self.pixel_array.astype('float')/255),
|
||||
dtype = self.camera.pixel_array_dtype
|
||||
)
|
||||
if curr_array is None:
|
||||
curr_array = new_array
|
||||
else:
|
||||
curr_array = np.maximum(curr_array, new_array)
|
||||
self.pixel_array[:,:] = 0
|
||||
self.reset_canvas()
|
||||
return curr_array
|
||||
|
||||
|
||||
class MovingCamera(Camera):
|
||||
"""
|
||||
Stays in line with the height, width and position
|
||||
|
|
|
@ -68,7 +68,7 @@ def get_configuration():
|
|||
for short_arg, long_arg in optional_args:
|
||||
parser.add_argument(short_arg, long_arg, action = "store_true")
|
||||
parser.add_argument("-o", "--output_name")
|
||||
parser.add_argument("-n", "--skip_to_animation_number")
|
||||
parser.add_argument("-n", "--start_at_animation_number")
|
||||
args = parser.parse_args()
|
||||
except argparse.ArgumentError as err:
|
||||
print(str(err))
|
||||
|
@ -88,7 +88,8 @@ def get_configuration():
|
|||
"ignore_waits" : args.preview,
|
||||
"write_all" : args.write_all,
|
||||
"output_name" : args.output_name,
|
||||
"skip_to_animation_number" : args.skip_to_animation_number,
|
||||
"start_at_animation_number" : args.start_at_animation_number,
|
||||
"end_at_animation_number" : None,
|
||||
}
|
||||
if args.low_quality:
|
||||
config["camera_config"] = LOW_QUALITY_CAMERA_CONFIG
|
||||
|
@ -100,13 +101,18 @@ def get_configuration():
|
|||
config["camera_config"] = PRODUCTION_QUALITY_CAMERA_CONFIG
|
||||
config["frame_duration"] = PRODUCTION_QUALITY_FRAME_DURATION
|
||||
|
||||
stan = config["skip_to_animation_number"]
|
||||
stan = config["start_at_animation_number"]
|
||||
if stan is not None:
|
||||
config["skip_to_animation_number"] = int(stan)
|
||||
if "," in stan:
|
||||
start, end = stan.split(",")
|
||||
config["start_at_animation_number"] = int(start)
|
||||
config["end_at_animation_number"] = int(end)
|
||||
else:
|
||||
config["start_at_animation_number"] = int(stan)
|
||||
|
||||
config["skip_animations"] = any([
|
||||
config["show_last_frame"] and not config["write_to_movie"],
|
||||
config["skip_to_animation_number"],
|
||||
config["start_at_animation_number"],
|
||||
])
|
||||
return config
|
||||
|
||||
|
@ -220,7 +226,8 @@ def main():
|
|||
"write_to_movie",
|
||||
"output_directory",
|
||||
"save_pngs",
|
||||
"skip_to_animation_number",
|
||||
"start_at_animation_number",
|
||||
"end_at_animation_number",
|
||||
]
|
||||
])
|
||||
|
||||
|
|
20
helpers.py
20
helpers.py
|
@ -126,7 +126,7 @@ def rgba_to_color(rgba):
|
|||
return rgb_to_color(rgba[:3])
|
||||
|
||||
def rgb_to_hex(rgb):
|
||||
return Color(rgb = rgb).get_hex_l()
|
||||
return "#" + "".join('%02x'%int(255*x) for x in rgb)
|
||||
|
||||
def invert_color(color):
|
||||
return rgb_to_color(1.0 - color_to_rgb(color))
|
||||
|
@ -226,6 +226,24 @@ def all_elements_are_instances(iterable, Class):
|
|||
def adjacent_pairs(objects):
|
||||
return zip(objects, list(objects[1:])+[objects[0]])
|
||||
|
||||
def batch_by_property(items, property_func):
|
||||
batches = []
|
||||
def add_batch(batch):
|
||||
if len(batch) > 0:
|
||||
batches.append(batch)
|
||||
curr_batch = []
|
||||
curr_prop = None
|
||||
for item in items:
|
||||
prop = property_func(item)
|
||||
if prop != curr_prop:
|
||||
add_batch(curr_batch)
|
||||
curr_prop = prop
|
||||
curr_batch = [item]
|
||||
else:
|
||||
curr_batch.append(item)
|
||||
add_batch(curr_batch)
|
||||
return batches
|
||||
|
||||
def complex_to_R3(complex_num):
|
||||
return np.array((complex_num.real, complex_num.imag, 0))
|
||||
|
||||
|
|
|
@ -6,5 +6,5 @@ __all__ = [
|
|||
|
||||
from mobject import Mobject, Group
|
||||
from point_cloud_mobject import Point, Mobject1D, Mobject2D, PMobject
|
||||
from vectorized_mobject import VMobject, VGroup, BackgroundColoredVMobject
|
||||
from vectorized_mobject import VMobject, VGroup
|
||||
from image_mobject import ImageMobject
|
|
@ -17,6 +17,7 @@ class VMobject(Mobject):
|
|||
"propagate_style_to_family" : False,
|
||||
"pre_function_handle_to_anchor_scale_factor" : 0.01,
|
||||
"make_smooth_after_applying_functions" : False,
|
||||
"background_image_file" : None,
|
||||
}
|
||||
|
||||
def get_group_class(self):
|
||||
|
@ -120,6 +121,9 @@ class VMobject(Mobject):
|
|||
)
|
||||
return self
|
||||
|
||||
def get_fill_rgb(self):
|
||||
return self.fill_rgb
|
||||
|
||||
def get_fill_color(self):
|
||||
try:
|
||||
self.fill_rgb = np.clip(self.fill_rgb, 0.0, 1.0)
|
||||
|
@ -130,6 +134,9 @@ class VMobject(Mobject):
|
|||
def get_fill_opacity(self):
|
||||
return np.clip(self.fill_opacity, 0, 1)
|
||||
|
||||
def get_stroke_rgb(self):
|
||||
return self.stroke_rgb
|
||||
|
||||
def get_stroke_color(self):
|
||||
try:
|
||||
self.stroke_rgb = np.clip(self.stroke_rgb, 0, 1)
|
||||
|
@ -145,6 +152,16 @@ class VMobject(Mobject):
|
|||
return self.get_stroke_color()
|
||||
return self.get_fill_color()
|
||||
|
||||
def color_using_background_image(self, background_image_file):
|
||||
self.background_image_file = background_image_file
|
||||
self.highlight(WHITE)
|
||||
for submob in self.submobjects:
|
||||
submob.color_using_background_image(background_image_file)
|
||||
return self
|
||||
|
||||
def get_background_image_file(self):
|
||||
return self.background_image_file
|
||||
|
||||
## Drawing
|
||||
def start_at(self, point):
|
||||
if len(self.points) == 0:
|
||||
|
@ -464,46 +481,4 @@ class VectorizedPoint(VMobject):
|
|||
def set_location(self,new_loc):
|
||||
self.set_points(np.array([new_loc]))
|
||||
|
||||
class BackgroundColoredVMobject(VMobject):
|
||||
CONFIG = {
|
||||
# Can be set to None, using set_background_array to initialize instead
|
||||
"background_image_file" : "color_background",
|
||||
"stroke_color" : WHITE,
|
||||
"fill_color" : WHITE,
|
||||
}
|
||||
def __init__(self, vmobject, **kwargs):
|
||||
# Note: At the moment, this does nothing to mimic
|
||||
# the full family of the vmobject passed in.
|
||||
VMobject.__init__(self, **kwargs)
|
||||
|
||||
#Match properties of vmobject
|
||||
self.points = np.array(vmobject.points)
|
||||
self.set_stroke(WHITE, vmobject.get_stroke_width())
|
||||
self.set_fill(WHITE, vmobject.get_fill_opacity())
|
||||
for submob in vmobject.submobjects:
|
||||
self.add(BackgroundColoredVMobject(submob, **kwargs))
|
||||
|
||||
if self.background_image_file != None:
|
||||
#Initialize background array
|
||||
path = get_full_raster_image_path(self.background_image_file)
|
||||
image = Image.open(path)
|
||||
self.set_background_array(np.array(image))
|
||||
|
||||
def set_background_array(self, background_array):
|
||||
self.background_array = background_array
|
||||
|
||||
def resize_background_array(self, new_width, new_height, mode = "RGBA"):
|
||||
image = Image.fromarray(self.background_array, mode = mode)
|
||||
resized_image = image.resize((new_width, new_height))
|
||||
self.background_array = np.array(resized_image)
|
||||
|
||||
def resize_background_array_to_match(self, pixel_array):
|
||||
height, width = pixel_array.shape[:2]
|
||||
mode = "RGBA" if pixel_array.shape[2] == 4 else "RGB"
|
||||
self.resize_background_array(width, height, mode)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,85 +1,85 @@
|
|||
"""
|
||||
mnist_loader
|
||||
~~~~~~~~~~~~
|
||||
|
||||
A library to load the MNIST image data. For details of the data
|
||||
structures that are returned, see the doc strings for ``load_data``
|
||||
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
|
||||
function usually called by our neural network code.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import cPickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
def load_data():
|
||||
"""Return the MNIST data as a tuple containing the training data,
|
||||
the validation data, and the test data.
|
||||
|
||||
The ``training_data`` is returned as a tuple with two entries.
|
||||
The first entry contains the actual training images. This is a
|
||||
numpy ndarray with 50,000 entries. Each entry is, in turn, a
|
||||
numpy ndarray with 784 values, representing the 28 * 28 = 784
|
||||
pixels in a single MNIST image.
|
||||
|
||||
The second entry in the ``training_data`` tuple is a numpy ndarray
|
||||
containing 50,000 entries. Those entries are just the digit
|
||||
values (0...9) for the corresponding images contained in the first
|
||||
entry of the tuple.
|
||||
|
||||
The ``validation_data`` and ``test_data`` are similar, except
|
||||
each contains only 10,000 images.
|
||||
|
||||
This is a nice data format, but for use in neural networks it's
|
||||
helpful to modify the format of the ``training_data`` a little.
|
||||
That's done in the wrapper function ``load_data_wrapper()``, see
|
||||
below.
|
||||
"""
|
||||
f = gzip.open('/Users/grant/cs/neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')
|
||||
training_data, validation_data, test_data = cPickle.load(f)
|
||||
f.close()
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def load_data_wrapper():
|
||||
"""Return a tuple containing ``(training_data, validation_data,
|
||||
test_data)``. Based on ``load_data``, but the format is more
|
||||
convenient for use in our implementation of neural networks.
|
||||
|
||||
In particular, ``training_data`` is a list containing 50,000
|
||||
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
|
||||
containing the input image. ``y`` is a 10-dimensional
|
||||
numpy.ndarray representing the unit vector corresponding to the
|
||||
correct digit for ``x``.
|
||||
|
||||
``validation_data`` and ``test_data`` are lists containing 10,000
|
||||
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
|
||||
numpy.ndarry containing the input image, and ``y`` is the
|
||||
corresponding classification, i.e., the digit values (integers)
|
||||
corresponding to ``x``.
|
||||
|
||||
Obviously, this means we're using slightly different formats for
|
||||
the training data and the validation / test data. These formats
|
||||
turn out to be the most convenient for use in our neural network
|
||||
code."""
|
||||
tr_d, va_d, te_d = load_data()
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
|
||||
training_results = [vectorized_result(y) for y in tr_d[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
|
||||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
"""
|
||||
mnist_loader
|
||||
~~~~~~~~~~~~
|
||||
|
||||
A library to load the MNIST image data. For details of the data
|
||||
structures that are returned, see the doc strings for ``load_data``
|
||||
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
|
||||
function usually called by our neural network code.
|
||||
"""
|
||||
|
||||
#### Libraries
|
||||
# Standard library
|
||||
import cPickle
|
||||
import gzip
|
||||
|
||||
# Third-party libraries
|
||||
import numpy as np
|
||||
|
||||
def load_data():
|
||||
"""Return the MNIST data as a tuple containing the training data,
|
||||
the validation data, and the test data.
|
||||
|
||||
The ``training_data`` is returned as a tuple with two entries.
|
||||
The first entry contains the actual training images. This is a
|
||||
numpy ndarray with 50,000 entries. Each entry is, in turn, a
|
||||
numpy ndarray with 784 values, representing the 28 * 28 = 784
|
||||
pixels in a single MNIST image.
|
||||
|
||||
The second entry in the ``training_data`` tuple is a numpy ndarray
|
||||
containing 50,000 entries. Those entries are just the digit
|
||||
values (0...9) for the corresponding images contained in the first
|
||||
entry of the tuple.
|
||||
|
||||
The ``validation_data`` and ``test_data`` are similar, except
|
||||
each contains only 10,000 images.
|
||||
|
||||
This is a nice data format, but for use in neural networks it's
|
||||
helpful to modify the format of the ``training_data`` a little.
|
||||
That's done in the wrapper function ``load_data_wrapper()``, see
|
||||
below.
|
||||
"""
|
||||
f = gzip.open('/Users/grant/cs/neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')
|
||||
training_data, validation_data, test_data = cPickle.load(f)
|
||||
f.close()
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def load_data_wrapper():
|
||||
"""Return a tuple containing ``(training_data, validation_data,
|
||||
test_data)``. Based on ``load_data``, but the format is more
|
||||
convenient for use in our implementation of neural networks.
|
||||
|
||||
In particular, ``training_data`` is a list containing 50,000
|
||||
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
|
||||
containing the input image. ``y`` is a 10-dimensional
|
||||
numpy.ndarray representing the unit vector corresponding to the
|
||||
correct digit for ``x``.
|
||||
|
||||
``validation_data`` and ``test_data`` are lists containing 10,000
|
||||
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
|
||||
numpy.ndarry containing the input image, and ``y`` is the
|
||||
corresponding classification, i.e., the digit values (integers)
|
||||
corresponding to ``x``.
|
||||
|
||||
Obviously, this means we're using slightly different formats for
|
||||
the training data and the validation / test data. These formats
|
||||
turn out to be the most convenient for use in our neural network
|
||||
code."""
|
||||
tr_d, va_d, te_d = load_data()
|
||||
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
|
||||
training_results = [vectorized_result(y) for y in tr_d[1]]
|
||||
training_data = zip(training_inputs, training_results)
|
||||
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
|
||||
validation_data = zip(validation_inputs, va_d[1])
|
||||
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
|
||||
test_data = zip(test_inputs, te_d[1])
|
||||
return (training_data, validation_data, test_data)
|
||||
|
||||
def vectorized_result(j):
|
||||
"""Return a 10-dimensional unit vector with a 1.0 in the jth
|
||||
position and zeroes elsewhere. This is used to convert a digit
|
||||
(0...9) into a corresponding desired output from the neural
|
||||
network."""
|
||||
e = np.zeros((10, 1))
|
||||
e[j] = 1.0
|
||||
return e
|
||||
|
|
|
@ -39,7 +39,8 @@ class Scene(Container):
|
|||
"name" : None,
|
||||
"always_continually_update" : False,
|
||||
"random_seed" : 0,
|
||||
"skip_to_animation_number" : None,
|
||||
"start_at_animation_number" : None,
|
||||
"end_at_animation_number" : None,
|
||||
}
|
||||
def __init__(self, **kwargs):
|
||||
Container.__init__(self, **kwargs) # Perhaps allow passing in a non-empty *mobjects parameter?
|
||||
|
@ -406,14 +407,17 @@ class Scene(Container):
|
|||
if len(args) == 0:
|
||||
warnings.warn("Called Scene.play with no animations")
|
||||
return
|
||||
if self.skip_to_animation_number:
|
||||
if self.num_plays + 1 == self.skip_to_animation_number:
|
||||
if self.start_at_animation_number:
|
||||
if self.num_plays == self.start_at_animation_number:
|
||||
self.skip_animations = False
|
||||
if self.end_at_animation_number:
|
||||
if self.num_plays >= self.end_at_animation_number:
|
||||
self.skip_animations = True
|
||||
return self #Don't even both with the rest...
|
||||
if self.skip_animations:
|
||||
kwargs["run_time"] = 0
|
||||
|
||||
animations = self.compile_play_args_to_animation_list(*args)
|
||||
self.num_plays += 1
|
||||
|
||||
sync_animation_run_times_and_rate_funcs(*animations, **kwargs)
|
||||
moving_mobjects = self.get_moving_mobjects(*animations)
|
||||
|
@ -429,6 +433,7 @@ class Scene(Container):
|
|||
self.mobjects_from_last_animation = moving_mobjects
|
||||
self.clean_up_animations(*animations)
|
||||
self.continual_update(0)
|
||||
self.num_plays += 1
|
||||
return self
|
||||
|
||||
def clean_up_animations(self, *animations):
|
||||
|
|
|
@ -136,8 +136,6 @@ class NumberLine(VMobject):
|
|||
self.tip = tip
|
||||
self.add(tip)
|
||||
|
||||
|
||||
|
||||
class UnitInterval(NumberLine):
|
||||
CONFIG = {
|
||||
"x_min" : 0,
|
||||
|
|
|
@ -40,22 +40,17 @@ class ThreeDCamera(CameraWithPerspective):
|
|||
self.rotation_mobject = VectorizedPoint()
|
||||
self.set_position(self.phi, self.theta, self.distance)
|
||||
|
||||
def get_color(self, method):
|
||||
color = method()
|
||||
vmobject = method.im_self
|
||||
def modified_rgb(self, vmobject, rgb):
|
||||
if should_shade_in_3d(vmobject):
|
||||
return Color(rgb = self.get_shaded_rgb(
|
||||
color_to_rgb(color),
|
||||
normal_vect = self.get_unit_normal_vect(vmobject)
|
||||
))
|
||||
return self.get_shaded_rgb(rgb, self.get_unit_normal_vect(vmobject))
|
||||
else:
|
||||
return color
|
||||
|
||||
def get_stroke_color(self, vmobject):
|
||||
return self.get_color(vmobject.get_stroke_color)
|
||||
def get_stroke_rgb(self, vmobject):
|
||||
return self.modified_rgb(vmobject, vmobject.get_stroke_rgb())
|
||||
|
||||
def get_fill_color(self, vmobject):
|
||||
return self.get_color(vmobject.get_fill_color)
|
||||
def get_fill_rgb(self, vmobject):
|
||||
return self.modified_rgb(vmobject, vmobject.get_fill_rgb())
|
||||
|
||||
def get_shaded_rgb(self, rgb, normal_vect):
|
||||
brightness = np.dot(normal_vect, self.unit_sun_vect)**2
|
||||
|
|
Loading…
Add table
Reference in a new issue