post epii project state, without tex files

This commit is contained in:
Grant Sanderson 2015-03-22 13:33:02 -06:00
parent 4f6d019667
commit 948e1e3038
13 changed files with 4534 additions and 0 deletions

478
animate.py Normal file
View file

@ -0,0 +1,478 @@
from PIL import Image
from colour import Color
import numpy as np
import warnings
import time
import os
import copy
import progressbar
import inspect
from images2gif import writeGif
from helpers import *
from mobject import *
import displayer as disp
class Animation(object):
def __init__(self,
mobject,
alpha_func = high_inflection_0_to_1,
run_time = DEFAULT_ANIMATION_RUN_TIME,
pause_time = DEFAULT_ANIMATION_PAUSE_TIME,
dither_time = DEFAULT_DITHER_TIME,
name = None):
if isinstance(mobject, type) and issubclass(mobject, Mobject):
self.mobject = mobject()
self.starting_mobject = mobject()
elif isinstance(mobject, Mobject):
self.mobject = mobject
self.starting_mobject = copy.deepcopy(mobject)
else:
raise Exception("Invalid mobject parameter, must be \
subclass or instance of Mobject")
self.reference_mobjects = [self.starting_mobject]
self.alpha_func = alpha_func or (lambda x : x)
self.run_time = run_time
self.pause_time = pause_time
self.dither_time = dither_time
self.nframes, self.ndither_frames = self.get_frame_count()
self.nframes_past = 0
self.frames = []
self.concurrent_animations = []
self.following_animations = []
self.reference_animations = []
self.background_mobjects = []
self.filter_functions = []
self.restricted_height = SPACE_HEIGHT
self.restricted_width = SPACE_WIDTH
self.spacial_center = np.zeros(3)
self.name = self.__class__.__name__ + str(self.mobject)
self.inputted_name = name
def __str__(self):
return self.inputted_name or self.name
def get_points_and_rgbs(self):
"""
It is the responsibility of this class to only emit points within
the space. Returns np array of points and corresponding np array
of rgbs
"""
points = np.zeros(0)
rgbs = np.zeros(0)
for mobject in self.background_mobjects + [self.mobject]:
points = np.append(points, mobject.points)
rgbs = np.append(rgbs, mobject.rgbs)
#Kind of hacky
if mobject.SHOULD_BUFF_POINTS: #TODO, think about this.
up_nudge = np.array(
(2.0 * SPACE_HEIGHT / HEIGHT, 0, 0)
)
side_nudge = np.array(
(0, 2.0 * SPACE_WIDTH / WIDTH, 0)
)
for nudge in up_nudge, side_nudge, up_nudge + side_nudge:
points = np.append(points, mobject.points + nudge)
rgbs = np.append(rgbs, mobject.rgbs)
points = points.reshape((points.size/3, 3))
rgbs = rgbs.reshape((rgbs.size/3, 3))
#Filters out what is out of bounds.
admissibles = (abs(points[:,0]) < self.restricted_width) * \
(abs(points[:,1]) < self.restricted_height)
for filter_function in self.filter_functions:
admissibles *= ~filter_function(points)
if any(self.spacial_center):
points += self.spacial_center
#Filter out points pushed off the edge
admissibles *= (abs(points[:,0]) < SPACE_WIDTH) * \
(abs(points[:,1]) < SPACE_HEIGHT)
if rgbs.shape[0] < points.shape[0]:
#TODO, this shouldn't be necessary, find what's happening.
points = points[:rgbs.shape[0], :]
admissibles = admissibles[:rgbs.shape[0]]
return points[admissibles, :], rgbs[admissibles, :]
def update(self):
if self.nframes_past > self.nframes:
return False
self.nframes_past += 1
for anim in self.concurrent_animations + self.reference_animations:
anim.update()
self.update_mobject(self.alpha_func(self.get_fraction_complete()))
return True
def while_also(self, action, display = True, *args, **kwargs):
if isinstance(action, type) and issubclass(action, Animation):
self.reference_animations += [
action(mobject, *args, **kwargs)
for mobject in self.reference_mobjects + [self.mobject]
]
self.name += action.__name__
return self
if action.mobject == self.mobject:
#This is just for a weird edge case
action.mobject = self.starting_mobject
new_home = self.concurrent_animations if display else \
self.reference_animations
new_home.append(action)
self.name += str(action)
return self
def with_background(self, *mobjects):
for anim in [self] + self.following_animations:
anim.background_mobjects.append(CompoundMobject(*mobjects))
return self
def then(self, action, carry_over_background = False, *args, **kwargs):
if isinstance(action, type) and issubclass(action, Animation):
action = action(mobject = self.mobject, *args, **kwargs)
if carry_over_background:
action.background_mobjects += self.background_mobjects
self.following_animations.append(action)
if self.frames:
self.frames += action.get_frames()
self.name += "Then" + str(action)
return self
def get_image(self):
all_points, all_rgbs = self.get_points_and_rgbs()
for anim in self.concurrent_animations:
new_points, new_rgbs = anim.get_points_and_rgbs()
all_points = np.append(all_points, new_points)
all_rgbs = np.append(all_rgbs, new_rgbs)
all_points = all_points.reshape((all_points.size/3, 3))
all_rgbs = all_rgbs.reshape((all_rgbs.size/3, 3))
return disp.get_image(all_points, all_rgbs)
def generate_frames(self):
print "Generating " + str(self) + "..."
progress_bar = progressbar.ProgressBar(maxval=self.nframes)
progress_bar.start()
self.frames = []
while self.update():
self.frames.append(self.get_image())
progress_bar.update(self.nframes_past - 1)
self.clean_up()
for anim in self.following_animations:
self.frames += anim.get_frames()
progress_bar.finish()
return self
def get_fraction_complete(self):
result = float(self.nframes_past - self.ndither_frames) / (
self.nframes - 2 * self.ndither_frames)
if result <= 0:
return 0
elif result >= 1:
return 1
return result
def get_frames(self):
if not self.frames:
self.generate_frames()
return self.frames
def get_frame_count(self):
nframes = int((self.run_time + 2*self.dither_time)/ self.pause_time)
ndither_frames = int(self.dither_time / self.pause_time)
return nframes, ndither_frames
def filter_out(self, *filter_functions):
self.filter_functions += filter_functions
return self
def restrict_height(self, height):
self.restricted_height = min(height, SPACE_HEIGHT)
return self
def restrict_width(self, width):
self.restricted_width = min(width, SPACE_WIDTH)
return self
def shift(self, vector):
self.spacial_center += vector
for anim in self.following_animations:
anim.shift(vector)
return self
def set_dither(self, time, apply_to_concurrent = False):
self.dither_time = time
if apply_to_concurrent:
for anim in self.concurrent_animations + self.reference_animations:
anim.set_dither(time)
return self.reload()
def set_run_time(self, time, apply_to_concurrent = False):
self.run_time = time
if apply_to_concurrent:
for anim in self.concurrent_animations + self.reference_animations:
anim.set_run_time(time)
return self.reload()
def set_alpha_func(self, alpha_func):
if alpha_func is None:
alpha_func = lambda x : x
self.alpha_func = alpha_func
return self
def set_name(self, name):
self.inputted_name = name
return self
def reload(self):
self.nframes, self.ndither_frames = self.get_frame_count()
if self.frames:
self.nframes_past = 0
self.generate_frames()
return self
def drag_pixels(self):
self.frames = drag_pixels(self.get_frames())
return self
def reverse(self):
self.get_frames().reverse()
self.name = 'Reversed' + str(self)
return self
def write_to_gif(self, name = None):
disp.write_to_gif(self, name or str(self))
def write_to_movie(self, name = None):
disp.write_to_movie(self, name or str(self))
def update_mobject(self, alpha):
#Typically ipmlemented by subclass
pass
def clean_up(self):
pass
def dither(self):
pass
###### Concrete Animations ########
class Rotating(Animation):
def __init__(self,
mobject,
axis = None,
axes = [[0, 0, 1], [0, 1, 0]],
radians = 2 * np.pi,
run_time = 20.0,
dither_time = 0.0,
alpha_func = None,
*args, **kwargs):
Animation.__init__(
self, mobject,
run_time = run_time, dither_time = dither_time,
alpha_func = alpha_func,
*args, **kwargs
)
self.axes = [axis] if axis else axes
self.radians = radians
def update_mobject(self, alpha):
self.mobject.points = self.starting_mobject.points
for axis in self.axes:
self.mobject.rotate(
self.radians * alpha,
axis
)
class RotationAsTransform(Rotating):
def __init__(self, mobject, radians,
run_time = DEFAULT_ANIMATION_RUN_TIME,
dither_time = DEFAULT_DITHER_TIME,
*args, **kwargs):
Rotating.__init__(
self,
mobject,
axis = (0, 0, 1),
run_time = run_time,
dither_time = dither_time,
radians = radians,
alpha_func = high_inflection_0_to_1,
)
class FadeOut(Animation):
def update_mobject(self, alpha):
self.mobject.rgbs = self.starting_mobject.rgbs * (1 - alpha)
class Reveal(Animation):
def update_mobject(self, alpha):
self.mobject.rgbs = self.starting_mobject.rgbs * alpha
if self.mobject.points.shape != self.starting_mobject.points.shape:
self.mobject.points = self.starting_mobject.points
#TODO, Why do you need to do this? Shouldn't points always align?
class Transform(Animation):
def __init__(self, mobject1, mobject2, run_time = DEFAULT_TRANSFORM_RUN_TIME,
*args, **kwargs):
count1, count2 = mobject1.get_num_points(), mobject2.get_num_points()
Mobject.align_data(mobject1, mobject2)
Animation.__init__(self, mobject1, run_time = run_time, *args, **kwargs)
self.ending_mobject = mobject2
self.mobject.SHOULD_BUFF_POINTS = \
mobject1.SHOULD_BUFF_POINTS and mobject2.SHOULD_BUFF_POINTS
self.reference_mobjects.append(mobject2)
self.name += "To" + str(mobject2)
if count2 < count1:
#Ensure redundant pixels fade to black
indices = self.non_redundant_m2_indices = \
np.arange(0, count1-1, float(count1) / count2).astype('int')
temp = np.zeros(mobject2.points.shape)
temp[indices] = mobject2.rgbs[indices]
mobject2.rgbs = temp
def update_mobject(self, alpha):
Mobject.interpolate(
self.starting_mobject,
self.ending_mobject,
self.mobject,
alpha
)
def clean_up(self):
if hasattr(self, "non_redundant_m2_indices"):
#Reduce mobject (which has become identical to mobject2), as
#well as mobject2 itself
for mobject in [self.mobject, self.ending_mobject]:
for attr in ['points', 'rgbs']:
setattr(
mobject, attr,
getattr(
self.ending_mobject,
attr
)[self.non_redundant_m2_indices]
)
class ApplyMethod(Transform):
def __init__(self, method, mobject, *args, **kwargs):
"""
method is a method of Mobject
"""
method_args = ()
if isinstance(method, tuple):
method, method_args = method[0], method[1:]
if not inspect.ismethod(method):
raise "Not a valid Mobject method"
Transform.__init__(
self,
mobject,
method(copy.deepcopy(mobject), *method_args),
*args, **kwargs
)
class ApplyFunction(Transform):
def __init__(self, function, mobject, run_time = DEFAULT_ANIMATION_RUN_TIME,
*args, **kwargs):
map_image = copy.deepcopy(mobject)
map_image.points = np.array(map(function, map_image.points))
Transform.__init__(self, mobject, map_image, run_time = run_time,
*args, **kwargs)
self.name = "".join([
"Apply",
"".join([s.capitalize() for s in function.__name__.split("_")]),
"To" + str(mobject)
])
class ComplexFunction(ApplyFunction):
def __init__(self, function, *args, **kwargs):
def point_map(point):
x, y, z = point
c = np.complex(x, y)
c = function(c)
return c.real, c.imag, z
if len(args) > 0:
args = list(args)
mobject = args.pop(0)
elif "mobject" in kwargs:
mobject = kwargs.pop("mobject")
else:
mobject = Grid()
ApplyFunction.__init__(self, point_map, mobject, *args, **kwargs)
self.name = "ComplexFunction" + to_cammel_case(function.__name__)
#Todo, abstract away function naming'
class Homotopy(Animation):
def __init__(self, homotopy, *args, **kwargs):
"""
Homotopy a function from (x, y, z, t) to (x', y', z')
"""
self.homotopy = homotopy
Animation.__init__(self, *args, **kwargs)
def update_mobject(self, alpha):
self.mobject.points = np.array([
self.homotopy((x, y, z, alpha))
for x, y, z in self.starting_mobject.points
])
class ComplexHomotopy(Homotopy):
def __init__(self, complex_homotopy, *args, **kwargs):
"""
Complex Hootopy a function (z, t) to z'
"""
def homotopy((x, y, z, t)):
c = complex_homotopy((complex(x, y), t))
return (c.real, c.imag, z)
if len(args) > 0:
args = list(args)
mobject = args.pop(0)
elif "mobject" in kwargs:
mobject = kwargs["mobject"]
else:
mobject = Grid()
Homotopy.__init__(self, homotopy, mobject, *args, **kwargs)
self.name = "ComplexHomotopy" + \
to_cammel_case(complex_homotopy.__name__)
class ShowCreation(Animation):
def update_mobject(self, alpha):
new_num_points = int(alpha * self.starting_mobject.points.shape[0])
for attr in ["points", "rgbs"]:
setattr(
self.mobject,
attr,
getattr(self.starting_mobject, attr)[:new_num_points, :]
)
class Flash(Animation):
def __init__(self, mobject, color = "white", slow_factor = 0.01,
run_time = 0.1, dither_time = 0, alpha_func = None,
*args, **kwargs):
Animation.__init__(self, mobject, run_time = run_time,
dither_time = dither_time,
alpha_func = alpha_func,
*args, **kwargs)
self.intermediate = Mobject(color = color)
self.intermediate.add_points([
point + (x, y, 0)
for point in self.mobject.points
for x in [-1, 1]
for y in [-1, 1]
])
self.reference_mobjects.append(self.intermediate)
self.slow_factor = slow_factor
def update_mobject(self, alpha):
#Makes alpha go from 0 to slow_factor to 0 instead of 0 to 1
alpha = self.slow_factor * (1.0 - 4 * (alpha - 0.5)**2)
Mobject.interpolate(
self.starting_mobject,
self.intermediate,
self.mobject,
alpha
)

36
constants.py Normal file
View file

@ -0,0 +1,36 @@
import os
PRODUCTION_QUALITY = True
DEFAULT_POINT_DENSITY_2D = 25 if PRODUCTION_QUALITY else 20
DEFAULT_POINT_DENSITY_1D = 200 if PRODUCTION_QUALITY else 50
HEIGHT = 1024#1440 if PRODUCTION_QUALITY else 480
WIDTH = 1024#2560 if PRODUCTION_QUALITY else 640
#All in seconds
DEFAULT_ANIMATION_PAUSE_TIME = 0.04 if PRODUCTION_QUALITY else 0.1
DEFAULT_ANIMATION_RUN_TIME = 3.0
DEFAULT_TRANSFORM_RUN_TIME = 1.0
DEFAULT_DITHER_TIME = 1.0
GENERALLY_BUFF_POINTS = PRODUCTION_QUALITY
BACKGROUND_COLOR = "black" #TODO, this is never actually enforced anywhere.
DEFAULT_NUM_STARS = 1000
SPACE_HEIGHT = 4.0
SPACE_WIDTH = WIDTH * SPACE_HEIGHT / HEIGHT
PDF_DENSITY = 400
IMAGE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "images")
GIF_DIR = os.path.join(os.getenv("HOME"), "Desktop", "math_gifs")
MOVIE_DIR = os.path.join(os.getenv("HOME"), "Desktop", "math_movies")
PDF_DIR = os.path.join(os.getenv("HOME"), "Documents", "Tex", "Animations")
TMP_IMAGE_DIR = "/tmp/animation_images/"
for folder in [IMAGE_DIR, GIF_DIR, MOVIE_DIR, TMP_IMAGE_DIR]:
if not os.path.exists(folder):
os.mkdir(folder)
LOGO_PATH = os.path.join(IMAGE_DIR, "logo.png")

124
displayer.py Normal file
View file

@ -0,0 +1,124 @@
import numpy as np
import itertools as it
import os
from PIL import Image
import cv2
from animate import *
def get_image(points, rgbs):
return Image.fromarray(get_pixels(points, rgbs))
def get_pixels(points, rgbs):
#TODO, Let z add a depth componenet?
points = points[:, :2]
#Flips y-axis
points[:,1] *= -1
#Map points to pixel space, then create pixel array first in terms
#of its flattened version
points += np.array(
[SPACE_WIDTH, SPACE_HEIGHT]*points.shape[0]
).reshape(points.shape)
points *= np.array(
[HEIGHT / (2.0 * SPACE_HEIGHT), WIDTH / (2.0 * SPACE_WIDTH)]*\
points.shape[0]
).reshape(points.shape)
points = points.astype('int')
flattener = np.array([1, WIDTH], dtype = 'int').reshape((2, 1))
indices = np.dot(points, flattener)
indices = indices.reshape(indices.size)
admissibles = (indices < HEIGHT * WIDTH) * (indices > 0)
indices = indices[admissibles]
rgbs = rgbs[admissibles]
rgbs = (rgbs * 255).astype(int)
pixels = np.zeros((HEIGHT * WIDTH, 3))
pixels[indices] = rgbs
return pixels.reshape((HEIGHT, WIDTH, 3)).astype('uint8')
def write_to_gif(animation, name):
#TODO, find better means of compression
if not name.endswith(".gif"):
name += ".gif"
filepath = os.path.join(GIF_DIR, name)
temppath = os.path.join(GIF_DIR, "Temp.gif")
print "Writing " + name + "..."
writeGif(temppath, animation.get_frames(), animation.pause_time)
print "Compressing..."
os.system("gifsicle -O " + temppath + " > " + filepath)
os.system("rm " + temppath)
def write_to_movie(animation, name):
frames = animation.get_frames()
progress_bar = progressbar.ProgressBar(maxval=len(frames))
progress_bar.start()
print "writing " + name + "..."
tmp_stem = os.path.join(TMP_IMAGE_DIR, name.replace("/", "_"))
suffix = "-%04d.png"
image_files = []
for frame, count in zip(frames, it.count()):
progress_bar.update(int(0.9 * count))
frame.save(tmp_stem + suffix%count)
image_files.append(tmp_stem + suffix%count)
filepath = os.path.join(MOVIE_DIR, name + ".mp4")
filedir = "/".join(filepath.split("/")[:-1])
if not os.path.exists(filedir):
os.makedirs(filedir)
commands = [
"ffmpeg",
"-y",
"-loglevel",
"error",
"-i",
tmp_stem + suffix,
"-c:v",
"libx264",
"-vf",
"fps=%d,format=yuv420p"%int(1/animation.pause_time),
filepath
]
os.system(" ".join(commands))
for image_file in image_files:
os.remove(image_file)
progress_bar.finish()
# filepath = os.path.join(MOVIE_DIR, name + ".mov")
# fourcc = cv2.cv.FOURCC(*"8bps")
# out = cv2.VideoWriter(
# filepath, fourcc, 1.0/animation.pause_time, (WIDTH, HEIGHT), True
# )
# progress = 0
# for frame in frames:
# if progress == 0:
# print "Writing movie"
# progress_bar.update(progress)
# r, g, b = cv2.split(np.array(frame))
# bgr_frame = cv2.merge([b, g, r])
# out.write(bgr_frame)
# progress += 1
# out.release()
# progress_bar.finish()

0
epii/__init__.py Normal file
View file

787
epii/cccd_animations.py Normal file
View file

@ -0,0 +1,787 @@
from PIL import Image
from animate import *
from mobject import *
from constants import *
from helpers import *
from tex_image_utils import load_pdf_images
from displayer import *
import itertools as it
import os
import numpy as np
from copy import deepcopy
from epii_animations import MULTIPLIER_COLOR, ADDER_COLOR, ONE_COLOR
CCCD_MOVIE_DIR = "cccd"
symbol_images = load_pdf_images("cccd_symbols.pdf", regen_if_exists = False)
phrase_images = load_pdf_images("cccd_phrases.pdf", regen_if_exists = False)
name_to_image = dict(
zip([
"two",
"minus_1",
"i",
"x_squared",
"four",
"one",
"multiplication_function",
"deriv_def_base",
"deriv_def_inner_e_to_x",
"deriv_def_plus_h",
"deriv_def_e_to_h",
"deriv_def_one",
"deriv_def_outer_e_to_x",
"series_terms",
"series_exponents",
"series_exponents_minus_1",
"d_series_coefficients",
"one_plus",
"d_series_simple",
"deriv_x_squared",
"deriv_e_to_x",
"question_mark",
], symbol_images) + zip([
"complex_derivative_title",
"limit_explanation",
"velocity_vector_explanation",
"both_same_point",
"maybe_like_this",
"or_this",
"why_vectors",
"pause_and_ponder",
"think_in_pictures",
"remember_this",
], phrase_images)
)
def function_of_numbers():
kwargs = {"dither_time" : 0}
two, minus_1, i, x_squared, four, one = [
ImageMobject(name_to_image[name])
for name in ["two", "minus_1", "i", "x_squared",
"four", "one"]
]
minus_1_copy = copy.deepcopy(minus_1)
for mob1, mob2, height in [
(two, four, 2),
(minus_1, one, 0),
(i, minus_1_copy, -2)
]:
mob1.center().shift((-2, height, 0))
mob2.center().shift((2, height, 0))
x_squared.center()
point = Point()
inputs = CompoundMobject(two, minus_1, i)
outputs = CompoundMobject(four, one, minus_1_copy)
return Transform(
inputs, point, **kwargs
).then(
Transform(point, outputs, **kwargs)
).with_background(x_squared)
def real_function_graph():
graph = NumberLine()
graph.add(NumberLine().rotate(np.pi / 2))
int_size = graph.interval_size
min_x = SPACE_WIDTH / int_size
graph.add(FunctionGraph(
lambda x : x**2,
x_range = [-min_x, min_x]
))
point = graph.points[-20,:]
line = Line((2, 0, 0), (2, 1.1, 0)) #Terrible...
line.highlight("yellow")
return ShowCreation(graph).then(ShowCreation(line).with_background(graph))
def two_grids():
grid1, grid2 = Grid(), Grid()
return ShowCreation(grid1).then(
Rotating(grid1, run_time = 7.0, radians = np.pi / 3).while_also(
ShowCreation(grid2, dither_time = 2.0).while_also(
Rotating, axis = [1, 1, 0]
)
)
)
def z_squared():
return ComplexHomotopy(square_homotopy)
def z_squared_marked():
#Hard coded 2, i, -1
return ComplexHomotopy(
lambda (z, t) : z**(1 + t)
).while_also(
ComplexHomotopy(
lambda (z, t) : z + complex(2, 0)**(1 + t),
Cross(
color = random_color()
)
)
).while_also(
ComplexHomotopy(
lambda (z, t) : z + complex(-1, 0)**(1 + t),
Cross(
color = random_color()
)
)
).while_also(
ComplexHomotopy(
lambda (z, t) : z + complex(0, 1)**(1 + t),
Cross(
color = random_color()
)
)
)
def multiplier_in_the_wild(point):
func = ComplexFunction(lambda z : z*point)
one = Cross().highlight(ONE_COLOR).shift((1, 0, 0))
point = Cross().highlight(
MULTIPLIER_COLOR
).shift((point.real, point.imag, 0))
func_copy = copy.deepcopy(func)
return func.then(
func_copy.while_also(
Transform(one, point, run_time = DEFAULT_ANIMATION_RUN_TIME)
)
).then(
Animation(
Grid(), run_time = 2.0, dither_time = 0
).with_background(point)
)
def random_looking_function():
wongky_map = Transform(
Grid(), CubeShell().scale(SPACE_HEIGHT),
alpha_func = lambda t : 0.5 * high_inflection_0_to_1(t),
run_time = 3.0
)
return wongky_map.then(copy.deepcopy(wongky_map).reverse())
def zoom_in_on_map(function, is_homotopy, point, zoom_level):
center_line = ParametricFunction(lambda t : (0, t * SPACE_HEIGHT, 0))
half_width = SPACE_WIDTH / 2
left_center = (-half_width + center_line.epsilon, 0, 0)
right_center = (half_width - center_line.epsilon, 0, 0)
left_divider = copy.deepcopy(center_line).shift(right_center)
right_divider = copy.deepcopy(center_line).shift(left_center)
point = complex(point)
outer_circle = Circle().scale(SPACE_HEIGHT + SPACE_WIDTH + half_width)
inner_circle = Circle().scale(zoom_level).shift(
(point.real, point.imag, 0)
)
outer_to_inner = Transform(outer_circle, inner_circle).with_background(Grid())
big_radius = min(half_width, SPACE_HEIGHT)
big_circle = Circle().scale(big_radius)
bts_ratio = big_radius / zoom_level
half_grid = Grid().filter_out(lambda (x, y, z) : abs(x) > half_width or y > SPACE_HEIGHT)
one = Cross().shift((1, 0, 0)).highlight(ONE_COLOR)
if is_homotopy:
global_function = ComplexHomotopy(function, copy.deepcopy(half_grid))
def local_homotopoy((z, t)):
return bts_ratio * (function((z/bts_ratio + point, t)) - function((point,t)))
local_function = ComplexHomotopy(local_homotopoy, copy.deepcopy(half_grid))
one_following = ComplexHomotopy(
lambda (z, t) : z + local_homotopoy((1, t)) - 1,
one
)
circle_following = ComplexHomotopy(
lambda (z, t) : z + function((point, t)) - point,
inner_circle
)
else:
global_function = ComplexFunction(function, copy.deepcopy(half_grid))
def local_lambda(z):
return bts_ratio*(function(z/bts_ratio + point) - function(point))
local_function = ComplexFunction(local_lambda, copy.deepcopy(half_grid))
one_following = ComplexFunction(lambda z : z + local_lambda(1) - 1, one)
circle_following = ComplexFunction(
lambda z : z + function(point) - point,
inner_circle
)
zoom_region = Circle().scale(zoom_level)
zoom_region.add(Grid().filter_out(lambda p : np.linalg.norm(p) > zoom_level))
zoom_region.shift(left_center).shift((point.real, point.imag, 0))
zoom_in = ComplexFunction(
lambda z : bts_ratio * (z - point - complex(left_center[0], left_center[1])) + \
complex(right_center[0], right_center[1]),
zoom_region
).set_run_time(DEFAULT_TRANSFORM_RUN_TIME)
grow_local_grid = ShowCreation(
Grid().filter_out(lambda p : np.linalg.norm(p) > big_radius),
run_time = 1.0
).with_background(big_circle)
def out_of_circle(points):
return np.apply_along_axis(np.linalg.norm, 1, points) > big_radius
local_function.filter_out(out_of_circle)
for anim in global_function, outer_to_inner, circle_following:
anim.shift(left_center).restrict_width(half_width).with_background(left_divider)
for anim in local_function, grow_local_grid, one_following:
anim.shift(right_center).with_background(right_divider, big_circle)
for anim in outer_to_inner, zoom_in, grow_local_grid:
anim.set_dither(0)
#Kind of hacky...one day there will be a better way of doing this.
show_left_grid = Animation(Mobject().add_points(*global_function.get_points_and_rgbs()))
show_left_grid.with_background(copy.deepcopy(inner_circle).shift(left_center))
return outer_to_inner.then(
zoom_in.while_also(show_left_grid),
).then(
grow_local_grid.while_also(show_left_grid).while_also(zoom_in)
).then(
global_function.while_also(
local_function
).while_also(
circle_following
).while_also(
one_following
)
)
def z_squared_derivative_example(point, zoom_level):
point = complex(point)
point_coords = np.array((point.real, point.imag, 0))
circle = Circle().scale(zoom_level).shift(point_coords)
z = Cross(color = Circle.DEFAULT_COLOR).shift(point_coords)
two_z = Cross(color = MULTIPLIER_COLOR).shift(2*point_coords)
zero = Cross()
one = Cross(color = ONE_COLOR).shift((1, 0, 0))
plane = Grid()
return Transform(circle, z).with_background(plane, zero).then(
Transform(z, two_z).with_background(
plane, zero
).while_also(
Reveal(one).with_background(
two_z, plane, zero
)
).set_dither(0)
).then(
ComplexFunction(lambda c : c * 2 * point, plane).with_background(
two_z, zero
).while_also(
ComplexFunction(lambda c : c - 1 + 2 * point, one)
)
)
def z_squared_derivative_2z(zoom_level):
circles = Mobject()
crosses = Mobject()
mini_maps = []
mini_grids = Mobject()
example_range = range(-3, 4, 2)
for x in example_range:
for y in example_range:
circle = Circle().scale(zoom_level).shift((x, y, 0))
cross = Cross().shift((2*x, 2*y, 0))
mini_grid = Grid(
radius = zoom_level,
subinterval_size = 0.25
).filter_out(
lambda point : np.linalg.norm(point) > zoom_level
)
mini_maps.append(
ComplexFunction(
lambda z : (z + complex(x, y))**2 - complex(x, y)**2,
mini_grid
).filter_out(
lambda points : np.apply_along_axis(np.linalg.norm, 1, points) \
> zoom_level
).with_background(
Circle().scale(zoom_level)
).shift((x, y, 0)).set_dither(0)
)#generate frames so that lambda doesn't change
mini_grids.add(copy.deepcopy(mini_grid).shift((x, y, 0)))
Mobject.align_data(circle, cross)
circles.add(circle)
crosses.add(cross)
all_mini_maps = reduce(Animation.while_also, mini_maps)
crosses.highlight(MULTIPLIER_COLOR)
return FadeOut(Grid()).while_also(
Animation(CompoundMobject(circles, mini_grids))
).then(
all_mini_maps.with_background(circles)
).then(
ComplexFunction(lambda z : 2*z).while_also(
Transform(
circles, crosses,
run_time = DEFAULT_ANIMATION_RUN_TIME
)
)
)
def visualize_exp():
kwargs1 = {"run_time" : 2.0, "dither_time" : 1.0}
kwargs2 = {"run_time" : 2.0, "dither_time" : 0.0}
cylinder = Grid().apply_function(
lambda (x, y, z) : (x, np.sin(y), np.cos(y))
).rotate(np.pi/9, [0, 1, 0])
exp_plane = Grid().apply_complex_function(np.exp)
rotating_cyl = Rotating(cylinder, radians = np.pi/5, run_time = 5.0)
return Transform(Grid(), cylinder, **kwargs1).then(
Transform(cylinder, exp_plane, **kwargs2)
)
def derivative_definition():
base, inner_e_to_x, plus_h, e_to_h, one, outer_e_to_x = [
ImageMobject(name_to_image["deriv_def_" + name])
for name in [
"base",
"inner_e_to_x",
"plus_h",
"e_to_h",
"one",
"outer_e_to_x",
]
]
shift = (-0.2, -0.2, 0)
base.shift(shift)
outer_e_to_x.shift(shift)
limit_explanation = ImageMobject(name_to_image["limit_explanation"])
limit_explanation.shift((1, -2, 0))
return Transform(plus_h, e_to_h).with_background(
base, inner_e_to_x
).then(
Transform(inner_e_to_x, outer_e_to_x).while_also(
Reveal(one, dither_time = 1.0, run_time = 1.0)
).with_background(base, e_to_h)
).then(
Reveal(limit_explanation).with_background(
base, outer_e_to_x, one, e_to_h
)
)
def take_derivative_of_series():
series_terms, series_exponents, series_exponents_minus_1, \
d_series_coefficients, one_plus, d_series_simple = [
ImageMobject(name_to_image[name])
for name in [
"series_terms",
"series_exponents",
"series_exponents_minus_1",
"d_series_coefficients",
"one_plus",
"d_series_simple",
]
]
coefficients = copy.deepcopy(series_exponents)
fraction_bars = copy.deepcopy(series_exponents)
coefficients.filter_out(lambda (x, y, z) : y < 0.5)
series_exponents_minus_1.filter_out(lambda (x, y, z) : y < 0.5)
fraction_bars.filter_out(lambda (x, y, z) : y > 0.5)
exponenets_to_coefficients = Homotopy(
lambda (x, y, z, t) : (x - 0.5*t, y - 0.5*t + np.sin(np.pi * t), z),
coefficients
).with_background(fraction_bars)
d_series_non_simple = CompoundMobject(
series_terms,
copy.deepcopy(coefficients).shift((-0.5, -0.5, 0)),
series_exponents_minus_1,
series_exponents
)
d_series_simple.center().shift((1, 0, 0))
#Yeah, this is totally good programming...
fdxc = [-2, -0.4, 1.3] #first dividing x coordinates
sdxc = [-1.7, -0.8, .3] #second dividing x coordinates
broken_series_terms = [
copy.deepcopy(d_series_non_simple).filter_out(
lambda (x, y, z) : x > fdxc[0]),
copy.deepcopy(d_series_non_simple).filter_out(
lambda (x, y, z) : x > fdxc[1] or x < fdxc[0]),
copy.deepcopy(d_series_non_simple).filter_out(
lambda (x, y, z) : x > fdxc[2] or x < fdxc[1]),
copy.deepcopy(d_series_non_simple).filter_out(
lambda (x, y, z) : x < fdxc[2]),
]
broken_dseries_terms = [
copy.deepcopy(d_series_simple).filter_out(
lambda (x, y, z) : x > sdxc[0]),
copy.deepcopy(d_series_simple).filter_out(
lambda (x, y, z) : x > sdxc[1] or x < sdxc[0]),
copy.deepcopy(d_series_simple).filter_out(
lambda (x, y, z) : x > sdxc[2] or x < sdxc[1]),
copy.deepcopy(d_series_simple).filter_out(
lambda (x, y, z) : x < sdxc[2]),
]
simplify = None
for term1, term2 in zip(broken_series_terms, broken_dseries_terms):
anim = Transform(term1, term2)
if simplify:
simplify.while_also(anim)
else:
simplify = anim
series_terms.add(series_exponents)
return exponenets_to_coefficients.while_also(
Reveal(series_exponents_minus_1).with_background(
series_terms, series_exponents
)
).while_also(
Transform(one_plus, Point(one_plus.get_center()))
).set_dither(1.0, True).set_run_time(1.0, True).then(
simplify
)
def e_to_x_takes_adder_to_multiplier(point):
point = complex(point)
point_coords = (point.real, point.imag, 0)
image = np.exp(point)
image_coords = (image.real, image.imag, 0)
adder_cross = Cross().shift(point_coords).highlight(ADDER_COLOR)
multi_cross = Cross().shift(image_coords).highlight(MULTIPLIER_COLOR)
zero = Cross()
one = Cross().shift((1, 0, 0)).highlight(ONE_COLOR)
adder = ComplexFunction(
lambda z : z + point,
CompoundMobject(Grid(radius = SPACE_WIDTH + SPACE_HEIGHT), zero)
).with_background(adder_cross)
e_to_x = ComplexFunction(np.exp).while_also(
Transform(adder_cross, multi_cross, run_time = DEFAULT_ANIMATION_RUN_TIME)
)
multiplier = ComplexFunction(lambda z : image * z).while_also(
Transform(one, multi_cross, run_time = DEFAULT_ANIMATION_RUN_TIME)
).with_background(zero, multi_cross)
return adder.then(e_to_x).then(multiplier)
def e_to_x_derivative_zoom(point, zoom_level):
point = complex(point)
image_point = np.exp(point)
both_same_point = ImageMobject(
name_to_image["both_same_point"]
).shift((image_point.real, SPACE_HEIGHT - 1, 0))
left_point = (image_point.real - SPACE_WIDTH/2, image_point.imag, 0)
right_point = (image_point.real + SPACE_WIDTH/2, image_point.imag, 0)
left_arrow = Arrow(left_point, (-1, -1, 0)).highlight("white")
right_arrow = Arrow(right_point, (1, -1, 0)).highlight("white")
e_deriv_anim = zoom_in_on_map(np.exp, False, point, zoom_level)
#SUPER HACKY, YOU MUST MAKE A BETTER WAY TO DO THIS IN FUTURE
last_anim = e_deriv_anim.generate_frames().following_animations[-1]
background = Mobject()
for anim in [last_anim] + last_anim.concurrent_animations:
background.add_points(*anim.get_points_and_rgbs())
# background.display()
return e_deriv_anim.then(
Reveal(CompoundMobject(
left_arrow, right_arrow, both_same_point
)).with_background(background)
)
def other_possible_functions():
phrases = [
ImageMobject(name_to_image[name]).center().shift((0, -2, 0))
for name in ["maybe_like_this", "or_this"]
]
return ComplexFunction(np.sin).with_background(
phrases[0]
).then(
ComplexFunction(np.sinc).with_background(
phrases[1]
)
)
def setup_velocity_vector_discussion(zoom_level):
def homotopy((x, y, z, t)):
t = 3*t - 1.5
return (
x + t,
y + t**3 - t,
z
)
big_radius = SPACE_HEIGHT
def out_of_circle(points):
return np.apply_along_axis(np.linalg.norm, 1, points) > big_radius
landing_point = homotopy((0, 0, 0, 1))
cross = Cross().highlight(Circle.DEFAULT_COLOR)
small_circle = Circle().scale(2*zoom_level)
big_circle = Circle().scale(big_radius)
new_cross = copy.deepcopy(cross)
for mob in new_cross, small_circle:
mob.shift(landing_point)
wandering = Homotopy(homotopy, cross)
one = Cross().highlight(ONE_COLOR).shift((1, 0, 0))
multiply = ComplexFunction(
lambda z : z * complex(landing_point[0], landing_point[1])
).filter_out(out_of_circle)
return wandering.then(
Transform(small_circle, big_circle).with_background(
new_cross
)
).then(
multiply.with_background(big_circle).while_also(
Transform(one, new_cross, run_time = DEFAULT_ANIMATION_RUN_TIME)
).with_background(
new_cross
)
)
def traced_path():
path = ParametricFunction(
lambda t : (
np.sin(2 * np.pi * t),
(t-1)**2 + 1,
0
)
)
new_path = copy.deepcopy(path).apply_complex_function(np.exp)
return ShowCreation(path).then(
Transform(path, new_path)
)
def walking_north(start_point, vector_len):
vv_explanation = ImageMobject(name_to_image["velocity_vector_explanation"])
vv_explanation.scale(0.75).shift((0, -3, 0))
walk_kwargs = {"alpha_func" : None, "run_time" : 5.0}
start_point = complex(start_point)
end_point = start_point + complex(0, 4)
start_coords = (start_point.real, start_point.imag, 0)
end_coords = (end_point.real, end_point.imag, 0)
vector = Vector((0, vector_len, 0)).shift(start_coords)
vector.add(vv_explanation)
start_cross = Cross().shift(start_coords)
end_cross = Cross().shift(end_coords)
path = Line(start_coords, end_coords).highlight(start_cross.DEFAULT_COLOR)
return Transform(
start_cross, end_cross, **walk_kwargs
).with_background(Grid()).while_also(
ShowCreation(path, **walk_kwargs)
).then(
Animation(vector), True
)
def map_north_vector(start_point, vector_len, zoom_level):
question_mark = ImageMobject(name_to_image["question_mark"]).center()
kwargs = {
"run_time" : DEFAULT_ANIMATION_RUN_TIME,
"dither_time" : DEFAULT_DITHER_TIME
}
start_point = complex(start_point)
image_point = np.exp(start_point)
start_coords = np.array((start_point.real, start_point.imag, 0))
image_coords = np.array((image_point.real, image_point.imag, 0))
vector = Vector((0, vector_len, 0)).add(Circle().scale(zoom_level))
vimage = copy.deepcopy(vector)
image_len = np.linalg.norm(image_coords)
image_arg = np.log(image_point).imag
stretched = copy.deepcopy(vimage).scale(image_len)
vector.shift(start_coords)
for vect in vimage, stretched:
vect.shift(image_coords)
question_mark.shift(image_coords + (0.3, 0, 0))
line_to_image = Line((0, 0, 0), image_coords)
line_along_horiz = Line((0, 0, 0), (image_len, 0, 0)).highlight(Grid.DEFAULT_COLOR)
return Transform(vector, vimage).then(
Reveal(question_mark, dither_time = 0).with_background(vimage)
).then(
Transform(vimage, stretched, **kwargs).while_also(
ShowCreation(line_to_image, **kwargs)
)
).with_background(Grid()).then(
RotationAsTransform(
copy.deepcopy(stretched).shift(-image_coords),
radians = image_arg, **kwargs
).shift(image_coords).while_also(
RotationAsTransform(
line_along_horiz, radians = image_arg, **kwargs
).with_background(Grid(), line_to_image)
)
)
def all_possible_vectors(vector_len):
tvkwargs = {"run_time" : 2.0, "dither_time" : 0}
turn_vectors = Animation(Grid(), **tvkwargs)
prototype = Vector((0, vector_len, 0))
start_vectors = Mobject()
final_vectors = Mobject()
radii = []
example_range = range(-3, 4)
for x, y in it.product(*[example_range]*2):
length = np.linalg.norm((x, y))
arg = np.log(complex(x, y)).imag
new = copy.deepcopy(prototype)
turn_vectors.while_also(
ComplexFunction(
lambda z : z * complex(x, y),
new, **tvkwargs
).shift((x, y, 0))
)
if length > 0:
radii.append(length)
start_vectors.add(copy.deepcopy(new).shift((x, y, 0)))
final_vectors.add(copy.deepcopy(new).scale(length).rotate(arg).shift((x, y, 0)))
to_vectors = Transform(prototype, start_vectors, **tvkwargs).with_background(Grid())
turn_vectors.set_name("TurnVectors")
radii = sorted(set(radii))
show_all_circles = Reveal(CompoundMobject(*[
Circle().scale(radius) for radius in radii
]), dither_time = 2.0)
show_all_circles.with_background(Grid(), final_vectors)
show_all_circles.set_name("ShowAllCircles")
return to_vectors.then(turn_vectors).then(show_all_circles)
class VelocityVectorsOfPath(Animation):
def __init__(self, path, vector_len = 1, alpha_func = None, *args, **kwargs):
self.path = path.points
diffs = path.points[1:,:] - path.points[:-1, :]
self.unit_distance = np.mean(np.apply_along_axis(np.linalg.norm, 1, diffs))
Animation.__init__(
self, Vector(point = (vector_len, 0, 0)), alpha_func = alpha_func
)
self.with_background(path)
def update_mobject(self, alpha):
index = int(alpha * self.path.shape[0])
if index >= self.path.shape[0] - 1:
return
point1, point2 = self.path[index, :], self.path[index + 1, :]
diff = (point2 - point1)
distance = np.linalg.norm(diff)
arg = np.log(complex(diff[0], diff[1])).imag
self.mobject.points = self.starting_mobject.points * (distance / self.unit_distance)
self.mobject.rotate(arg).shift(point1)
def map_trajectories(vector_len, path_func):
half_width = SPACE_WIDTH / 2
left_center = np.array((-half_width, 0, 0))
right_center = np.array((half_width, 0, 0))
dividing_line = Line((half_width, SPACE_HEIGHT, 0), (half_width, -SPACE_HEIGHT, 0))
left_grid = Grid(radius = SPACE_HEIGHT)
left_path = ParametricFunction(path_func).highlight("white")
right_path = copy.deepcopy(left_path).apply_complex_function(np.exp)
right_grid = copy.deepcopy(left_grid).apply_complex_function(np.exp)
for grid in left_grid, right_grid:
grid.filter_out(
lambda (x, y, z) : abs(x) > half_width
)
left_start = Cross().shift(left_path.points[0, :])
right_start = Cross().shift(right_path.points[0, :])
apply_function = ComplexFunction(
np.exp, copy.deepcopy(left_grid)
).restrict_width(half_width + 0.1) #STUPID HACK
move_right_grid = ComplexFunction(
lambda z : z + SPACE_WIDTH,
copy.deepcopy(right_grid).shift(left_center),
run_time = DEFAULT_TRANSFORM_RUN_TIME
).with_background(copy.deepcopy(left_grid).shift(left_center))
draw_left_path = ShowCreation(left_path)
draw_right_path = ShowCreation(right_path)
show_left_start = Reveal(left_start)
show_right_start = Reveal(right_start)
left_vectors = VelocityVectorsOfPath(left_path, vector_len)
right_vectors = VelocityVectorsOfPath(right_path, vector_len)
for anim in apply_function, draw_left_path, show_left_start, left_vectors:
anim.shift(left_center).with_background(
left_grid, dividing_line
)
for anim in draw_right_path, show_right_start, right_vectors:
anim.shift(right_center).with_background(right_grid)
for anim in draw_left_path, draw_right_path, show_left_start, show_right_start:
anim.set_dither(0)
for anim, bg in (show_left_start, left_path), (show_right_start, right_path):
anim.set_alpha_func(there_and_back).with_background(bg)
left_vectors.with_background(left_path)
right_vectors.with_background(right_path)
return apply_function.then(move_right_grid).then(
draw_left_path.while_also(draw_right_path)
).then(
show_left_start.while_also(show_right_start)
).then(
left_vectors.while_also(right_vectors)
)
if __name__ == '__main__':
example_complex = complex(1, 1)
example_complex2 = complex(2, -1)
zoom_level = 0.5
strong_zoom_level = 0.1
vector_len = 0.5
def square_homotopy((z, t)):
return z**(1 + t)
def example_walk(t):
return ((t + 1)/2, ((t + 1)/2)**2, 0)
def walk_imaginary_axis(t):
return (0, np.pi * (t + 1), 0)
functions = [
# function_of_numbers,
# real_function_graph,
# two_grids,
# z_squared,
# z_squared_marked,
# (multiplier_in_the_wild, [example_complex2]),
# random_looking_function,
# (zoom_in_on_map, [square_homotopy, True, example_complex, zoom_level]),
# (zoom_in_on_map, [square_homotopy, True, example_complex, strong_zoom_level]),
# (z_squared_derivative_example, [example_complex, zoom_level]),
# (z_squared_derivative_2z, [zoom_level]),
# visualize_exp,
# take_derivative_of_series,
# derivative_definition,
# (e_to_x_takes_adder_to_multiplier, [example_complex]),
# (e_to_x_derivative_zoom, [example_complex, strong_zoom_level]),
# other_possible_functions,
# (setup_velocity_vector_discussion, [strong_zoom_level]),
# traced_path,
# (walking_north, [example_complex, vector_len]),
# (map_north_vector, [example_complex, vector_len, strong_zoom_level]),
# (all_possible_vectors, [vector_len]),
# (map_trajectories, [vector_len, example_walk]),
# (map_trajectories, [vector_len, walk_imaginary_axis])
]
full_path = os.path.join(MOVIE_DIR, CCCD_MOVIE_DIR)
if not os.path.exists(full_path):
os.mkdir(full_path)
for func in functions:
args = []
if isinstance(func, tuple):
func, args = func
name = os.path.join(
CCCD_MOVIE_DIR,
to_cammel_case(func.__name__) + hash_args(args)
)
func(*args).write_to_movie(name)
for anim in [
# ComplexFunction(lambda z : 0.1*(z**3 - z**2 + 3)),
# ComplexFunction(np.exp, Grid(radius = SPACE_HEIGHT)),
]:
anim.write_to_movie(os.path.join(CCCD_MOVIE_DIR, str(anim)))
for name in [
# "complex_derivative_title",
# "why_vectors",
# "pause_and_ponder",
# "think_in_pictures",
# "remember_this",
# "deriv_x_squared",
# "deriv_e_to_x",
# "multiplication_function",
]:
ImageMobject(name_to_image[name]).center().save_image(
os.path.join(CCCD_MOVIE_DIR, to_cammel_case(name))
)

View file

@ -0,0 +1,16 @@
from PIL import Image
import itertools as it
from constants import *
from helpers import invert_image
from tex_image_utils import load_pdf_images
if __name__ == "__main__":
folder = os.path.join(MOVIE_DIR, "dexp")
if not os.path.exists(folder):
os.makedirs(folder)
images = load_pdf_images("discover_exp.pdf", regen_if_exists = False)
for image, count in zip(images, it.count()):
filepath = os.path.join(folder, "dexp-%d.png"%count)
invert_image(image).save(filepath)

1141
epii/epii_animations.py Normal file

File diff suppressed because it is too large Load diff

298
epii/poem_animations.py Normal file
View file

@ -0,0 +1,298 @@
#!/usr/bin/env python
from PIL import Image
from animate import *
from mobject import *
from constants import *
from helpers import *
from tex_image_utils import load_pdf_images
from displayer import *
import itertools as it
import os
import numpy as np
from copy import deepcopy
from epii_animations import name_to_image
PI_COLOR = "red"
E_COLOR = "skyblue"
I_COLOR = "green"
ADDER_COLOR = "limegreen"
MULTIPLIER_COLOR = "yellow"
ONE_COLOR = "skyblue"
POEM_MOVIE_DIR = "poem"
symbol_images = load_pdf_images("epii_poem.pdf", regen_if_exists = False)
RUN_TIMES = [
0.4,
0.4,
0.4,
0.4,
0.4,
0.4,
0.4,
0.4,
]
DITHER_TIMES = [
0,
0.1,
0,
0.1,
0,
0.05,
0.0,
0.1,
]
LAST_FRAME_REST_KWARGS = {"run_time" : 1.0, "dither_time" : 0}
LINE_KWARGS = [
{"run_time" : run_time, "dither_time" : dither_time}
for run_time, dither_time in zip(RUN_TIMES, DITHER_TIMES)
]
LINES_PER_VERSE = 8
LINES_PER_LAST_VERSE = 4
VERSES = 10
def get_text_transitions(verse):
num_lines = LINES_PER_LAST_VERSE if (verse == VERSES - 1) else LINES_PER_VERSE
lines = [
ImageMobject(symbol_images[LINES_PER_VERSE * verse + x])
for x in range(num_lines)
]
lines[2].shift((-1, 0, 0))
transitions = []
for x in range(num_lines):
if x == 0:
transition = Animation(lines[x], **LINE_KWARGS[x])
elif x == 1:
transition = Reveal(lines[x], **LINE_KWARGS[x])
elif x in range(2, num_lines-1):
transition = Transform(lines[x-2], lines[x], **LINE_KWARGS[x])
else:
transition = Transform(
CompoundMobject(lines[x-2], lines[x-1]), lines[x],
**LINE_KWARGS[x]
)
if x in range(1, num_lines-1):
transition.with_background(lines[x - 1])
transitions.append(transition)
return transitions
def augment_verse_0(transitions):
mobs = [e, pi, i, equals_neg1] = [
ImageMobject(name_to_image[name])
for name in ["e", "pi", "i", "equals_neg1"]
]
center = CompoundMobject(*mobs).get_center()
for mob in mobs:
mob.shift(-center)
for x, mob in zip([1, 2, 3, 7], mobs):
transitions[x].while_also(ShowCreation(mob, **LINE_KWARGS[x]))
for y in range(x + 1, LINES_PER_VERSE):
transitions[y].with_background(mob)
def augment_verse_1(transitions):
e, pi, i, e_by_e_pi_i_times = [
ImageMobject(name_to_image[name])
for name in ["e", "pi", "i", "e_by_e_pi_i_times"]
]
epii = CompoundMobject(e, pi, i).center()
for x in range(4):
transitions[x].with_background(epii)
transitions[4].while_also(Transform(epii, e_by_e_pi_i_times, **LINE_KWARGS[4]))
for x in range(5, LINES_PER_VERSE):
transitions[x].with_background(e_by_e_pi_i_times)
def augment_verse_2(transitions):
e, pi, i, pi_question, i_question = [
ImageMobject(name_to_image[name])
for name in ["e", "pi", "i", "pi_question", "i_question"]
]
center = CompoundMobject(e, pi, i).get_center()
for mob in e, pi, i:
mob.shift(-center)
pi.highlight(PI_COLOR)
pi_question.highlight(PI_COLOR).shift((-1, -1, 0))
i.highlight(I_COLOR)
i_question.highlight(I_COLOR).shift((1, 1, 0))
for x in [2, 3]:
transitions[x].with_background(pi_question, i_question)
transitions[4].while_also(
Transform(pi_question, pi, **LINE_KWARGS[4])
).with_background(i_question)
transitions[5].while_also(
Transform(i_question, i, **LINE_KWARGS[5])
).with_background(pi)
for x in [6, 7]:
transitions[x].with_background(pi, i)
transitions[7].while_also(Reveal(e, **LINE_KWARGS[7]))
def augment_verse_3(transitions):
one, i, minus, two, three_point_five = [
ImageMobject(name_to_image[name])
for name in ["one", "i", "minus", "two", "three_point_five"]
]
minus.shift((-0.8, 0.25, 0))
minus_two = CompoundMobject(minus, two)
nums = [one, i, minus_two, three_point_five]
for num in nums:
num.center().shift((0.5, 0, 0)).highlight(ADDER_COLOR)
i.scale(2)
plane = Grid(radius = SPACE_WIDTH + SPACE_HEIGHT)
transitions[3].while_also(ShowCreation(plane, **LINE_KWARGS[3]))
for x, c, num in zip([4, 5, 6, 7], [1, complex(0, 1), -2, 3.5], nums):
transitions[x].while_also(
ComplexFunction(lambda z : z + c, plane, **LINE_KWARGS[x])
).with_background(num)
def augment_verse_4(transitions):
i, two = [
ImageMobject(name_to_image[name])
for name in ["i", "two"]
]
for num in i, two:
num.center().shift((0.5, 0, 0)).highlight(MULTIPLIER_COLOR)
plane = Grid(radius = SPACE_WIDTH + SPACE_HEIGHT)
for x in [0, 1, 2, 3, 6, 7]:
transitions[x].with_background(plane)
transitions[4].while_also(
RotationAsTransform(plane, np.pi/2, **LINE_KWARGS[4])
).with_background(i)
transitions[5].while_also(
ComplexFunction(lambda z : 2*z, plane, **LINE_KWARGS[5])
).with_background(two)
def augment_verse_5(transitions):
e_to_x = ImageMobject(name_to_image["e_to_x"]).center()
for transition in transitions:
transition.with_background(e_to_x)
def augment_verse_6(transitions):
e_to_x, e_by_e_x_times, not_what_is_happening, two, e_to_2 = [
ImageMobject(name_to_image[name])
for name in ["e_to_x", "e_by_e_x_times", "not_what_is_happening",
"two", "e_to_2_value"]
]
two.center().shift((-2, 0, 0)).highlight(ADDER_COLOR)
e_to_2.center().shift((2, 0, 0)).highlight(MULTIPLIER_COLOR)
e_to_x.center()
point = Point()
not_what_is_happening.rotate(np.pi/7).highlight("red")
transitions[0].while_also(Transform(e_to_x, e_by_e_x_times, **LINE_KWARGS[0]))
transitions[1].while_also(
Reveal(not_what_is_happening, **LINE_KWARGS[1])
).with_background(e_by_e_x_times)
for x in [2, 3]:
transitions[x].with_background(e_by_e_x_times, not_what_is_happening)
transitions[4].with_background(two, e_to_x)
transitions[5].while_also(
Transform(two, point, **LINE_KWARGS[5])
).with_background(e_to_x)
transitions[6].while_also(
Transform(point, e_to_2, **LINE_KWARGS[6])
).with_background(e_to_x)
transitions[7].with_background(e_to_2, e_to_x)
def augment_verse_7(transitions):
plane = Grid(SPACE_HEIGHT + SPACE_WIDTH)
for x, c in zip([0, 1, 4, 5], [1, -1, complex(0, 1), complex(0, -1)]):
transitions[x].while_also(
ComplexFunction(lambda z : z + c, plane, **LINE_KWARGS[x])
)
big_plane = copy.deepcopy(plane).scale(2)
for x, c, mob in zip([2, 3], [2, 0.5], [plane, big_plane]):
transitions[x].while_also(
ComplexFunction(lambda z : z*c, mob, **LINE_KWARGS[x])
)
rotated_plane = copy.deepcopy(plane).rotate(np.pi / 4)
for x, r, mob in zip([6, 7], [np.pi/4, -np.pi/4], [plane, rotated_plane]):
transitions[x].while_also(
RotationAsTransform(mob, r, **LINE_KWARGS[x])
)
def augment_verse_8(transitions):
pi, i, neg_1 = [
ImageMobject(name_to_image[name])
for name in ["pi", "i", "neg_1"]
]
pi_i = CompoundMobject(pi, i).center()
pi_i.shift((-0.3, np.pi, 0))
neg_1.center().shift((-1.2, 0, 0))
imaginaries = ParametricFunction(
lambda t : (0, np.pi * t, 0),
color = ADDER_COLOR
)
plane = Grid()
circle = Circle(color = MULTIPLIER_COLOR)
transitions[0].with_background(plane)
transitions[1].while_also(
Reveal(pi_i, **LINE_KWARGS[1])
).with_background(plane)
transitions[2].while_also(
ComplexFunction(lambda z : z + np.pi * complex(0, 1), **LINE_KWARGS[2])
).with_background(pi_i)
transitions[3].with_background(plane, pi_i)
transitions[4].while_also(
Transform(imaginaries, circle, **LINE_KWARGS[4])
).while_also(
Transform(pi, neg_1, **LINE_KWARGS[4])
).with_background(plane)
transitions[5].with_background(plane, neg_1, circle)
transitions[6].while_also(
RotationAsTransform(plane, np.pi, **LINE_KWARGS[6])
).with_background(circle, neg_1)
transitions[7].with_background(plane, circle, neg_1)
def augment_verse_9(transitions):
mobs = [e, pi, i, equals_neg1] = [
ImageMobject(name_to_image[name])
for name in ["e", "pi", "i", "equals_neg1"]
]
epii_neg1 = CompoundMobject(*mobs).center()
for transition in transitions:
transition.with_background(epii_neg1)
if __name__ == '__main__':
augment_verse = [
augment_verse_0,
augment_verse_1,
augment_verse_2,
augment_verse_3,
augment_verse_4,
augment_verse_5,
augment_verse_6,
augment_verse_7,
augment_verse_8,
augment_verse_9,
]
for verse in range(VERSES):
transitions = get_text_transitions(verse)
augment_verse[verse](transitions)
name = os.path.join(POEM_MOVIE_DIR, "Verse%d"%verse)
reduce(Animation.then, transitions).write_to_movie(name)

44
generate_logo.py Normal file
View file

@ -0,0 +1,44 @@
#!/usr/bin/env python
from PIL import Image
from animate import *
from mobject import *
from constants import *
from helpers import *
import itertools as it
import os
import numpy as np
DARK_BLUE = "#236B8E"
DARK_BROWN = "#8B4513"
LIGHT_BROWN = "#CD853F"
size = 1.5
circle = Circle(color = 'skyblue').repeat(4).scale(size)
sphere = Sphere(density = 100, color = DARK_BLUE).scale(size)
sphere.rotate(-np.pi / 7, [1, 0, 0])
sphere.rotate(-np.pi / 7)
alpha = 0.3
iris = Mobject()
Mobject.interpolate(circle, sphere, iris, alpha)
for mob, color in [(iris, LIGHT_BROWN), (circle, DARK_BROWN)]:
mob.highlight(color, lambda (x, y, z) : x < 0 and y > 0)
mob.highlight("black", lambda point: np.linalg.norm(point) < size/2)
name = ImageMobject(NAME_TO_IMAGE_FILE["3Blue1Brown"]).center()
name.highlight("gray")
# name.highlight(DARK_BROWN, lambda (x, y, z) : x < 0 and y > 0)
name.shift((0, -2, 0))
create_eye = Transform(
circle, iris,
run_time = DEFAULT_ANIMATION_RUN_TIME,
name = "LogoGeneration"
).then(
Animation(name, dither_time = 0)
).drag_pixels()
create_eye.write_to_movie()
index = int(DEFAULT_ANIMATION_RUN_TIME / DEFAULT_ANIMATION_PAUSE_TIME)
create_eye.frames[index].save(LOGO_PATH)

140
helpers.py Normal file
View file

@ -0,0 +1,140 @@
import numpy as np
import itertools as it
from PIL import Image
from colour import Color
from random import random
import string
from constants import *
def hash_args(args):
args = map(lambda arg : arg.__name__ if type(arg) == type(hash_args) else arg, args)
return str(hash(str(args))%1000) if args else ""
def random_color():
color = Color()
color.set_rgb([1 - 0.5 * random() for x in range(3)])
return color
def to_cammel_case(name):
parts = name.split("_")
parts = [
filter(
lambda c : c not in string.punctuation + string.whitespace, part
).capitalize()
for part in parts
]
return "".join(parts)
def drag_pixels(images):
curr = np.array(images[0])
new_images = []
for image in images:
curr += (curr == 0) * np.array(image)
new_images.append(Image.fromarray(curr))
return new_images
def invert_image(image):
arr = np.array(image)
arr = (255 * np.ones(arr.shape)).astype(arr.dtype) - arr
return Image.fromarray(arr)
def make_even(iterable_1, iterable_2):
list_1, list_2 = list(iterable_1), list(iterable_2)
length = max(len(list_1), len(list_2))
return (
[list_1[(n * len(list_1)) / length] for n in xrange(length)],
[list_2[(n * len(list_2)) / length] for n in xrange(length)]
)
def make_even_by_cycling(iterable_1, iterable_2):
length = max(len(iterable_1), len(iterable_2))
cycle1 = it.cycle(iterable_1)
cycle2 = it.cycle(iterable_2)
return (
[cycle1.next() for x in range(length)],
[cycle2.next() for x in range(length)]
)
def sigmoid(x):
return 1.0/(1 + np.exp(-x))
def high_inflection_0_to_1(t, inflection = 10.0):
error = sigmoid(-inflection / 2)
return (sigmoid(inflection*(t - 0.5)) - error) / (1 - 2*error)
def there_and_back(t, inflection = 10.0):
new_t = 2*t if t < 0.5 else 2*(1 - t)
return high_inflection_0_to_1(new_t, inflection)
def composition(func_list):
"""
func_list should contain elements of the form (f, args)
"""
return reduce(
lambda (f1, args1), (f2, args2) : (lambda x : f1(f2(x, *args2), *args1)),
func_list,
lambda x : x
)
def remove_nones(sequence):
return filter(lambda x : x, sequence)
#Matrix operations
def rotation_matrix(angle, axis):
"""
Rotation in R^3 about a specified axess of rotation.
"""
about_z = rotation_about_z(angle)
z_to_axis = z_to_vector(axis)
axis_to_z = np.linalg.inv(z_to_axis)
return reduce(np.dot, [z_to_axis, about_z, axis_to_z])
def rotation_about_z(angle):
return [
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
]
def z_to_vector(vector):
"""
Returns some matrix in SO(3) which takes the z-axis to the
(normalized) vector provided as an argument
"""
norm = np.linalg.norm(vector)
if norm == 0:
return np.identity(3)
v = np.array(vector) / norm
phi = np.arccos(v[2])
if any(v[:2]):
#projection of vector to {x^2 + y^2 = 1}
axis_proj = v[:2] / np.linalg.norm(v[:2])
theta = np.arccos(axis_proj[0])
if axis_proj[1] < 0:
theta = -theta
else:
theta = 0
phi_down = np.array([
[np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]
])
return np.dot(rotation_about_z(theta), phi_down)
def rotate_vector(vector, angle, axis):
#Slightly hacky, changes vector in place
vector[:3] = np.dot(rotation_matrix(angle, axis), vector)
def angle_between(v1, v2):
return np.arccos(np.dot(
v1 / np.linalg.norm(v1),
v2 / np.linalg.norm(v2)
))

845
images2gif.py Normal file
View file

@ -0,0 +1,845 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Some implementation details are ased on gif file structure as provided
by wikipedia.
"""
import os
import progressbar
try:
import PIL
from PIL import Image, ImageChops
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
def getheaderAnim(im):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(im):
""" Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
wether additional colours comes in play that require a redefined palette
Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
"""
bb = '\x2C' # Image separator,
bb += intToBin( 0 ) # Left position
bb += intToBin( 0 ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
bb += '\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
#def getAppExt(loops=float('inf')):
#compile error commented by zcwang
def getAppExt(loops=float(0)):
""" Application extention. Part that specifies amount of loops.
If loops is inf, it goes on infinitely.
"""
if loops == 0:
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
# if loops == float('inf'):
if loops == float(0):
loops = 2**16-1
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparancy
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def _writeGifToFile(fp, images, durations, loops):
""" Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = getheaderAnim(im)
appext = getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = getGraphicsControlExt(durations[frames])
# Make image descriptor suitable for using 256 local color palette
lid = getImageDescriptor(im)
# Write local header
if palette != globalPalette:
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False, nq=0):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed.
"""
progress_bar = progressbar.ProgressBar(maxval=len(images))
progress_bar.start()
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
NQ = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=NQ.paletteImage())
else:
im = NQ.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
count = 0
for im in images:
progress_bar.update(count)
count += 1
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images2):
durations = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images2]
# Open file
fp = open(filename, 'wb')
# Write
try:
n = _writeGifToFile(fp, images2, duration, loops)
finally:
fp.close()
progress_bar.finish()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
assert image.mode == "RGBA"
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
out.write(rr if rgb else bb)
out.write(gg)
out.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(range(mid-1,-1,-1)+range(-1,mid))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Beginning 1D learning: samplepixels =",samplepixels," rad =", rad
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print tmp + printed_string,
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Finished 1D learning: final alpha =",(1.0*alpha)/self.INITALPHA,"!"
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if cKDTree:
return self.quantize_with_scipy(image)
else:
print 'Scipy not available, falling back to slower version.'
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print "Distance:", (result[0].sum()/(w*h))
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, (r, g, b)):
i = self.inxsearch(r, g, b)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)

555
mobject.py Normal file
View file

@ -0,0 +1,555 @@
import numpy as np
import itertools as it
import os
from PIL import Image
from random import random
from animate import *
from tex_image_utils import NAME_TO_IMAGE_FILE
import displayer as disp
class Mobject(object):
"""
Mathematical Object
"""
#Number of numbers used to describe a point (3 for pos, 3 for normal vector)
DIM = 3
DEFAULT_COLOR = Color("skyblue")
SHOULD_BUFF_POINTS = GENERALLY_BUFF_POINTS
def __init__(self,
color = None,
name = None,
center = None,
):
self.color = Color(color) if color else Color(self.DEFAULT_COLOR)
if not hasattr(self, "name"):
self.name = name or self.__class__.__name__
self.has_normals = hasattr(self, 'unit_normal')
self.points = np.zeros((0, 3))
self.rgbs = np.zeros((0, 3))
if self.has_normals:
self.unit_normals = np.zeros((0, 3))
self.generate_points()
if center:
self.center().shift(center)
def __str__(self):
return self.name
def display(self):
disp.get_image(self.points, self.rgbs).show()
def save_image(self, name = None):
disp.get_image(self.points, self.rgbs).save(
os.path.join(MOVIE_DIR, (name or str(self)) + ".png")
)
def add_points(self, points, rgbs = None, color = None):
"""
points must be a Nx3 numpy array, as must rgbs if it is not None
"""
points = np.array(points)
num_new_points = points.shape[0]
self.points = np.append(self.points, points)
self.points = self.points.reshape((self.points.size / 3, 3))
if rgbs is None:
color = Color(color) if color else self.color
rgbs = np.array([color.get_rgb()] * num_new_points)
else:
if rgbs.shape != points.shape:
raise Exception("points and rgbs must have same shape")
self.rgbs = np.append(self.rgbs, rgbs).reshape(self.points.shape)
if self.has_normals:
self.unit_normals = np.append(
self.unit_normals,
np.array([self.unit_normal(point) for point in points])
).reshape(self.points.shape)
return self
def rotate(self, angle, axis = [0, 0, 1]):
t_rotation_matrix = np.transpose(rotation_matrix(angle, axis))
self.points = np.dot(self.points, t_rotation_matrix)
if self.has_normals:
self.unit_normals = np.dot(self.unit_normals, t_rotation_matrix)
return self
def shift(self, vector):
cycle = it.cycle(vector)
v = np.array([cycle.next() for x in range(self.points.size)]).reshape(self.points.shape)
self.points += v
return self
def center(self):
self.shift(-self.get_center())
return self
def get_center(self):
return np.apply_along_axis(np.mean, 0, self.points)
def scale(self, scale_factor):
self.points *= scale_factor
return self
def add(self, *mobjects):
for mobject in mobjects:
self.add_points(mobject.points, mobject.rgbs)
return self
def get_num_points(self):
return self.points.shape[0]
def pose_at_angle(self):
self.rotate(np.pi / 7)
self.rotate(np.pi / 7, [1, 0, 0])
return self
def apply_function(self, function):
self.points = np.apply_along_axis(function, 1, self.points)
return self
def apply_complex_function(self, function):
def point_map((x, y, z)):
result = function(complex(x, y))
return (result.real, result.imag, 0)
return self.apply_function(point_map)
def highlight(self, color = "red", condition = lambda x : True):
"""
Condition is function which takes in one arguments, (x, y, z).
"""
to_change = np.apply_along_axis(condition, 1, self.points)
self.rgbs[to_change, :] *= 0
self.rgbs[to_change, :] += Color(color).get_rgb()
return self
def fade(self, amount = 0.5):
self.rgbs += amount
return self
def filter_out(self, condition):
to_eliminate = ~np.apply_along_axis(condition, 1, self.points)
self.points = self.points[to_eliminate]
self.rgbs = self.rgbs[to_eliminate]
return self
def generate_points(self):
#Typically implemented in subclass, unless purposefully left blank
pass
### Static Methods ###
def align_data(mobject1, mobject2):
count1, count2 = mobject1.get_num_points(), mobject2.get_num_points()
if count1 == 0:
mobject1.add_points([(0, 0, 0)])
if count2 == 0:
mobject2.add_points([(0, 0, 0)])
if count1 == count2:
return
for attr in ['points', 'rgbs']:
new_arrays = make_even(getattr(mobject1, attr), getattr(mobject2, attr))
for array, mobject in zip(new_arrays, [mobject1, mobject2]):
setattr(mobject, attr, np.array(array))
def interpolate(mobject1, mobject2, target_mobject, alpha):
"""
Turns target_mobject into an interpolation between mobject1
and mobject2.
"""
Mobject.align_data(mobject1, mobject2)
for attr in ['points', 'rgbs']:
new_array = (1 - alpha) * getattr(mobject1, attr) + \
alpha * getattr(mobject2, attr)
setattr(target_mobject, attr, new_array)
class Mobject1D(Mobject):
def __init__(self, density = DEFAULT_POINT_DENSITY_1D, *args, **kwargs):
self.epsilon = 1.0 / density
Mobject.__init__(self, *args, **kwargs)
class Mobject2D(Mobject):
def __init__(self, density = DEFAULT_POINT_DENSITY_2D, *args, **kwargs):
self.epsilon = 1.0 / density
Mobject.__init__(self, *args, **kwargs)
class CompoundMobject(Mobject):
def __init__(self, *mobjects):
Mobject.__init__(self)
for mobject in mobjects:
self.add_points(mobject.points, mobject.rgbs)
###### Concrete Mobjects ########
class Stars(Mobject):
DEFAULT_COLOR = "white"
SHOULD_BUFF_POINTS = False
def __init__(self, num_points = DEFAULT_NUM_STARS,
*args, **kwargs):
self.num_points = num_points
Mobject.__init__(self, *args, **kwargs)
def generate_points(self):
self.add_points([
(
r * np.sin(phi)*np.cos(theta),
r * np.sin(phi)*np.sin(theta),
r * np.cos(phi)
)
for x in range(self.num_points)
for r, phi, theta in [[
max(SPACE_HEIGHT, SPACE_WIDTH) * random(),
np.pi * random(),
2 * np.pi * random(),
]]
])
class Point(Mobject):
def __init__(self, point = (0, 0, 0), *args, **kwargs):
Mobject.__init__(self, *args, **kwargs)
self.points = np.array(point).reshape(1, 3)
self.rgbs = np.array(self.color.get_rgb()).reshape(1, 3)
class Arrow(Mobject1D):
NUNGE_DISTANCE = 0.1
def __init__(self, point = (0, 0, 0), direction = (-1, 1, 0),
length = 1, tip_length = 0.25,
normal = (0, 0, 1), *args, **kwargs):
self.point = np.array(point)
self.direction = np.array(direction) / np.linalg.norm(direction)
self.normal = np.array(normal)
self.length = length
self.tip_length = tip_length
Mobject1D.__init__(self, *args, **kwargs)
def generate_points(self):
self.add_points([
[x, x, x] * self.direction + self.point
for x in np.arange(-self.length, 0, self.epsilon)
])
tips_dir = np.array(-self.direction), np.array(-self.direction)
for i, sgn in zip([0, 1], [-1, 1]):
rotate_vector(tips_dir[i], sgn * np.pi / 4, self.normal)
self.add_points([
[x, x, x] * tips_dir[i] + self.point
for x in np.arange(0, self.tip_length, self.epsilon)
for i in [0, 1]
])
def nudge(self):
return self.shift(-self.direction * self.NUNGE_DISTANCE)
class Vector(Arrow):
def __init__(self, point = (1, 0, 0), *args, **kwargs):
length = np.linalg.norm(point)
Arrow.__init__(self, point = point, direction = point,
length = length, tip_length = 0.2 * length,
*args, **kwargs)
class Dot(Mobject1D):
DEFAULT_COLOR = "white"
def __init__(self, center = (0, 0, 0), radius = 0.05, *args, **kwargs):
self.center = center
self.radius = radius
Mobject1D.__init__(self, *args, **kwargs)
def generate_points(self):
self.add_points([
np.array((t*np.cos(theta), t*np.sin(theta), 0)) + self.center
for t in np.arange(0, self.radius, self.epsilon)
for theta in np.arange(0, 2 * np.pi, self.epsilon)
])
class Cross(Mobject1D):
RADIUS = 0.3
DEFAULT_COLOR = "white"
def generate_points(self):
self.add_points([
(sgn * x, x, 0)
for x in np.arange(-self.RADIUS / 2, self.RADIUS/2, self.epsilon)
for sgn in [-1, 1]
])
class Line(Mobject1D):
def __init__(self, start, end, density = DEFAULT_POINT_DENSITY_1D, *args, **kwargs):
self.start = np.array(start)
self.end = np.array(end)
density *= np.linalg.norm(self.start - self.end)
Mobject1D.__init__(self, density = density, *args, **kwargs)
def generate_points(self):
self.add_points([
t * self.end + (1 - t) * self.start
for t in np.arange(0, 1, self.epsilon)
])
class Cube(Mobject2D):
def generate_points(self):
self.add_points([
sgn * np.array(coords)
for x in np.arange(-1, 1, self.epsilon)
for y in np.arange(x, 1, self.epsilon)
for coords in it.permutations([x, y, 1])
for sgn in [-1, 1]
])
self.pose_at_angle()
def unit_normal(self, coords):
return np.array(map(lambda x : 1 if abs(x) == 1 else 0, coords))
class CubeShell(Mobject1D):
DEFAULT_COLOR = "yellow"
def generate_points(self):
self.add_points([
([a, b, c][p[0]], [a, b, c][p[1]], [a, b, c][p[2]])
for p in [(0, 1, 2), (2, 0, 1), (1, 2, 0)]
for a, b, c in it.product([-1, 1], [-1, 1], np.arange(-1, 1, self.epsilon))
])
self.pose_at_angle()
class Sphere(Mobject2D):
def generate_points(self):
self.add_points([
(
np.sin(phi) * np.cos(theta),
np.sin(phi) * np.sin(theta),
np.cos(phi)
)
for phi in np.arange(self.epsilon, np.pi, self.epsilon)
for theta in np.arange(0, 2 * np.pi, 2 * self.epsilon / np.sin(phi))
])
def unit_normal(self, coords):
return np.array(coords) / np.linalg.norm(coords)
class Circle(Mobject1D):
DEFAULT_COLOR = "red"
def generate_points(self):
self.add_points([
(np.cos(theta), np.sin(theta), 0)
for theta in np.arange(0, 2 * np.pi, self.epsilon)
])
def repeat(self, count):
#Can make transition animations quite pretty
for x in range(count - 1):
self.add_points(self.points)
return self
class FunctionGraph(Mobject1D):
DEFAULT_COLOR = "lightblue"
def __init__(self, function, x_range = [-10, 10], *args, **kwargs):
self.function = function
self.x_min = x_range[0] / SPACE_WIDTH
self.x_max = x_range[1] / SPACE_WIDTH
Mobject1D.__init__(self, *args, **kwargs)
def generate_points(self):
scale_factor = 2.0 * SPACE_WIDTH / (self.x_max - self.x_min)
self.epsilon /= scale_factor
self.add_points([
np.array([x, self.function(x), 0])
for x in np.arange(self.x_min, self.x_max, self.epsilon)
])
self.scale(scale_factor)
class ParametricFunction(Mobject):
DEFAULT_COLOR = "lightblue"
def __init__(self,
function,
dim = 1,
expected_measure = 10.0,
density = None,
*args,
**kwargs):
self.function = function
self.dim = dim
self.expected_measure = expected_measure
if density:
self.epsilon = 1.0 / density
elif self.dim == 1:
self.epsilon = 1.0 / expected_measure / DEFAULT_POINT_DENSITY_1D
else:
self.epsilon = 1.0 / np.sqrt(expected_measure) / DEFAULT_POINT_DENSITY_2D
Mobject.__init__(self, *args, **kwargs)
def generate_points(self):
if self.dim == 1:
self.add_points([
self.function(t)
for t in np.arange(-1, 1, self.epsilon)
])
if self.dim == 2:
self.add_points([
self.function(s, t)
for t in np.arange(-1, 1, self.epsilon)
for s in np.arange(-1, 1, self.epsilon)
])
class Grid(Mobject1D):
DEFAULT_COLOR = "green"
def __init__(self,
radius = max(SPACE_HEIGHT, SPACE_WIDTH),
interval_size = 1.0,
subinterval_size = 0.5,
*args, **kwargs):
self.radius = radius
self.interval_size = interval_size
self.subinterval_size = subinterval_size
Mobject1D.__init__(self, *args, **kwargs)
def generate_points(self):
self.add_points([
(sgns[0] * x, sgns[1] * y, 0)
for beta in np.arange(0, self.radius, self.interval_size)
for alpha in np.arange(0, self.radius, self.epsilon)
for sgns in it.product((-1, 1), (-1, 1))
for x, y in [(alpha, beta), (beta, alpha)]
])
if self.subinterval_size:
si = self.subinterval_size
color = Color(self.color)
color.set_rgb([x/2 for x in color.get_rgb()])
self.add_points([
(sgns[0] * x, sgns[1] * y, 0)
for beta in np.arange(0, self.radius, si)
if abs(beta % self.interval_size) > self.epsilon
for alpha in np.arange(0, self.radius, self.epsilon)
for sgns in it.product((-1, 1), (-1, 1))
for x, y in [(alpha, beta), (beta, alpha)]
], color = color)
class NumberLine(Mobject1D):
def __init__(self,
radius = SPACE_WIDTH,
interval_size = 0.5, tick_size = 0.1,
with_numbers = False, *args, **kwargs):
self.radius = int(radius) + 1
self.interval_size = interval_size
self.tick_size = tick_size
self.with_numbers = with_numbers
Mobject1D.__init__(self, *args, **kwargs)
def generate_points(self):
self.add_points([
(x, 0, 0)
for x in np.arange(-self.radius, self.radius, self.epsilon)
])
self.add_points([
(0, y, 0)
for y in np.arange(-2*self.tick_size, 2*self.tick_size, self.epsilon)
])
self.add_points([
(x, y, 0)
for x in np.arange(-self.radius, self.radius, self.interval_size)
for y in np.arange(-self.tick_size, self.tick_size, self.epsilon)
])
if self.with_numbers: #TODO, make these numbers a separate object
vertical_displacement = -0.3
max_explicit_num = 3
num_to_name = dict(
(x, str(x))
for x in range(-max_explicit_num, max_explicit_num + 1)
)
num_to_name[max_explicit_num + 1] = "cdots"
num_to_name[-max_explicit_num - 1] = "cdots"
nums = CompoundMobject(*[
ImageMobject(
NAME_TO_IMAGE_FILE[num_to_name[x]]
).scale(0.6).center().shift(
[x * self.interval_size, vertical_displacement, 0]
)
for x in range(-max_explicit_num - 1, max_explicit_num + 2)
])
self.add_points(nums.points, nums.rgbs)
# class ComplexPlane(Grid):
# def __init__(self, *args, **kwargs):
# Grid.__init__(self, *args, **kwargs)
# self.add(Dot())
class ImageMobject(Mobject2D):
"""
Automatically filters out black pixels
"""
# SHOULD_BUFF_POINTS = False
def __init__(self,
image,
filter_color = "black",
invert = True,
*args, **kwargs):
#TODO, Make sure you always convert to RGB
self.filter_rgb = 255 * np.array(Color(filter_color).get_rgb()).astype('uint8')
if isinstance(image, str):
self.name = to_cammel_case(
os.path.split(image)[-1].split(".")[0]
)
possible_paths = [
image,
os.path.join(IMAGE_DIR, image),
os.path.join(IMAGE_DIR, image + ".jpg"),
os.path.join(IMAGE_DIR, image + ".png"),
]
found = False
for path in possible_paths:
if os.path.exists(path):
image = Image.open(path).convert('RGB')
found = True
if not found:
raise IOError("File not Found")
if invert:
image = invert_image(image)
self.image_array = np.array(image)
Mobject2D.__init__(self, *args, **kwargs)
def generate_points(self):
height, width = self.image_array.shape[:2]
#Flatten array, and find indices where rgb is not filter_rgb
array = self.image_array.reshape((height * width, 3))
ones = np.ones(height * width, dtype = 'bool')
for i in range(3):
ones *= (array[:,i] != self.filter_rgb[i])
indices = np.arange(height * width, dtype = 'int')[ones]
rgbs = array[indices, :].astype('float') / 255.0
points = np.array([
(
i%width - (width / 2.0),
-i/width + (height / 2.0), #flip y-axis
0
)
for i in indices
], dtype = 'float64')
height, width = map(float, (height, width))
if height / width > float(HEIGHT) / WIDTH:
points *= 2 * SPACE_HEIGHT / height
else:
points *= 2 * SPACE_WIDTH / width
self.add_points(points, rgbs = rgbs)

70
tex_image_utils.py Normal file
View file

@ -0,0 +1,70 @@
import os
from PIL import Image
from constants import PDF_DIR, IMAGE_DIR, WIDTH, HEIGHT, PDF_DENSITY
def load_pdf_images(filename, regen_if_exists = False):
"""
Converts a pdf, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the pdf
"""
#TODO, Handle case where there is one page in the pdf!
possible_paths = [
filename,
os.path.join(PDF_DIR, filename),
os.path.join(PDF_DIR, filename + ".pdf"),
]
for path in possible_paths:
if os.path.exists(path):
directory, filename = os.path.split(path)
name = filename.split(".")[0]
images_dir = os.path.join(IMAGE_DIR, name)
already_exists = os.path.exists(images_dir)
if not already_exists:
os.mkdir(images_dir)
if not already_exists or regen_if_exists:
commands = [
"convert",
"-density",
str(PDF_DENSITY),
path,
"-size",
str(WIDTH) + "x" + str(HEIGHT),
os.path.join(images_dir, name + ".png")
]
os.system(" ".join(commands))
image_paths = [
os.path.join(images_dir, name)
for name in os.listdir(images_dir)
if name.endswith(".png")
]
image_paths.sort(cmp_enumerated_files)
return [Image.open(path).convert('RGB') for path in image_paths]
raise IOError("File not Found")
def cmp_enumerated_files(name1, name2):
num1, num2 = [
int(name.split(".")[0].split("-")[-1])
for name in (name1, name2)
]
return num1 - num2
SYMBOL_IMAGES = load_pdf_images("symbols.pdf", regen_if_exists = False)
NAME_TO_IMAGE_FILE = dict(
zip([
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"cdots",
"3Blue1Brown",
], SYMBOL_IMAGES)
)