One third of the way through counting in binary project

This commit is contained in:
Grant Sanderson 2015-08-21 19:58:36 -07:00
parent 461da7049b
commit 2a49abc6f3
10 changed files with 621 additions and 58 deletions

View file

@ -164,6 +164,11 @@ def remove_nones(sequence):
return filter(lambda x : x, sequence)
#Matrix operations
def thick_diagonal(dim, thickness = 2):
row_indices = np.arange(dim).repeat(dim).reshape((dim, dim))
col_indices = np.transpose(row_indices)
return (np.abs(row_indices - col_indices)<thickness).astype('uint8')
def rotation_matrix(angle, axis):
"""
Rotation in R^3 about a specified axess of rotation.

View file

@ -35,26 +35,41 @@ class ImageMobject(Mobject2D):
raise IOError("File not Found")
def generate_points_from_file(self, path, invert):
if self.read_in_cached_attrs(path, invert):
return
image = Image.open(path).convert('RGB')
if invert:
image = invert_image(image)
self.generate_points_from_image_array(np.array(image))
self.cache_attrs(path, invert)
def get_cached_attr_files(self, path, invert, attrs):
#Hash should be unique to (path, invert) pair
dtype = 'float'
unique_hash = str(hash(path+str(invert)))
cached_points, cached_rgbs = [
os.path.join(IMAGE_MOBJECT_DIR, unique_hash)+extension
for extension in ".points", ".rgbs"
return [
os.path.join(IMAGE_MOBJECT_DIR, unique_hash)+"."+attr
for attr in attrs
]
if os.path.exists(cached_points) and os.path.exists(cached_rgbs):
self.points = np.fromfile(cached_points, dtype = dtype)
self.rgbs = np.fromfile(cached_rgbs, dtype = dtype)
n_points = self.points.size/self.DIM
self.points = self.points.reshape(n_points, self.DIM)
self.rgbs = self.rgbs.reshape(n_points, 3)
else:
image = Image.open(path).convert('RGB')
if invert:
image = invert_image(image)
self.generate_points_from_image_array(np.array(image))
self.points.astype(dtype).tofile(cached_points)
self.rgbs.astype(dtype).tofile(cached_rgbs)
def read_in_cached_attrs(self, path, invert,
attrs = ("points", "rgbs"),
dtype = "float64"):
cached_attr_files = self.get_cached_attr_files(path, invert, attrs)
if all(map(os.path.exists, cached_attr_files)):
for attr, cache_file in zip(attrs, cached_attr_files):
arr = np.fromfile(cache_file, dtype = dtype)
arr = arr.reshape(arr.size/self.DIM, self.DIM)
setattr(self, attr, arr)
return True
return False
def cache_attrs(self, path, invert,
attrs = ("points", "rgbs"),
dtype = "float64"):
cached_attr_files = self.get_cached_attr_files(path, invert, attrs)
for attr, cache_file in zip(attrs, cached_attr_files):
getattr(self, attr).astype(dtype).tofile(cache_file)
def generate_points_from_image_array(self, image_array):
height, width = image_array.shape[:2]

View file

@ -132,7 +132,7 @@ class Mobject(object):
Direction just needs to be a vector pointing towards side or
corner in the 2d plane.
"""
shift_val = ORIGIN
shift_val = np.array(ORIGIN)
space_dim = (SPACE_WIDTH, SPACE_HEIGHT)
for i in [0, 1]:
if direction[i] == 0:

View file

@ -16,13 +16,22 @@ from script_wrapper import command_line_create_scene
class SampleScene(SceneFromVideo):
def construct(self):
path = os.path.join(MOVIE_DIR, "LogoGeneration.mp4")
SceneFromVideo.construct(self, path)
self.animate_over_time_range(
0, 3,
ApplyMethod(Dot().to_edge(LEFT).to_edge, RIGHT)
)
path = os.path.join(MOVIE_DIR, "EdgeDetectedCountingInBinary35-75.mp4")
SceneFromVideo.construct(self, path, time_range = (3, 5))
self.apply_gaussian_blur(sigmaX = 10)
self.make_all_black_or_white()
self.name = "BlurEdgeBlurBold"
# self.animate_over_time_range(
# 0, 3,
# ApplyMethod(Dot().to_edge(LEFT).to_edge, RIGHT)
# )
def make_all_black_or_white(self):
self.frames = [
255*(frame != 0).astype('uint8')
for frame in self.frames
]
if __name__ == "__main__":

View file

@ -1,3 +1,6 @@
from scene import *
from sub_scenes import *
from arithmetic_scenes import *
from arithmetic_scenes import *
from counting_scene import *
from pascals_triangle import *
from scene_from_video import *

View file

@ -6,6 +6,8 @@ from region import *
from constants import *
from helpers import *
DEFAULT_COUNT_NUM_OFFSET = (SPACE_WIDTH - 1, SPACE_HEIGHT - 1, 0)
DEFAULT_COUNT_RUN_TIME = 5.0
class CountingScene(Scene):
def count(self, items, item_type = "mobject", *args, **kwargs):

View file

@ -7,8 +7,6 @@ import time
import os
import copy
import progressbar
import inspect
import cv2
from helpers import *
from mobject import *
@ -16,9 +14,6 @@ from animation import *
import displayer as disp
from tk_scene import TkSceneRoot
DEFAULT_COUNT_NUM_OFFSET = (SPACE_WIDTH - 1, SPACE_HEIGHT - 1, 0)
DEFAULT_COUNT_RUN_TIME = 5.0
class Scene(object):
def __init__(self,
display_config = PRODUCTION_QUALITY_DISPLAY_CONFIG,
@ -85,6 +80,20 @@ class Scene(object):
)
return self
def highlight_region_over_time_range(self, region, time_range = None, color = "black"):
if time_range:
frame_range = map(lambda t : t / self.frame_duration, time_range)
frame_range[0] = max(frame_range[0], 0)
frame_range[1] = min(frame_range[1], len(self.frames))
else:
frame_range = (0, len(self.frames))
for index in range(frame_range[0], frame_range[1]):
self.frames[index] = disp.paint_region(
region,
image_array = self.frames[index],
color = color
)
def reset_background(self):
self.background = self.original_background
return self
@ -186,34 +195,6 @@ class Scene(object):
class SceneFromVideo(Scene):
def construct(self, file_name, freeze_last_frame = True):
cap = cv2.VideoCapture(file_name)
self.shape = (
int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
)
self.frame_duration = 1.0/cap.get(cv2.cv.CV_CAP_PROP_FPS)
frame_count = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
print "Reading in " + file_name + "..."
progress_bar = progressbar.ProgressBar(maxval=frame_count)
progress_bar.start()
while(cap.isOpened()):
returned, frame = cap.read()
if not returned:
break
b, g, r = cv2.split(frame)
self.frames.append(cv2.merge([r, g, b]))
progress_bar.update(len(self.frames))
cap.release()
progress_bar.finish()
if freeze_last_frame and len(self.frames) > 0:
self.original_background = self.background = self.frames[-1]

62
scene/scene_from_video.py Normal file
View file

@ -0,0 +1,62 @@
import numpy as np
import cv2
import itertools as it
from scene import *
class SceneFromVideo(Scene):
def construct(self, file_name,
freeze_last_frame = True,
time_range = None):
cap = cv2.VideoCapture(file_name)
self.shape = (
int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)),
int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
)
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
self.frame_duration = 1.0/fps
frame_count = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
if time_range is None:
start_frame = 0
end_frame = frame_count
else:
start_frame, end_frame = map(lambda t : fps*t, time_range)
frame_count = end_frame - start_frame
print "Reading in " + file_name + "..."
progress_bar = progressbar.ProgressBar(maxval=frame_count)
progress_bar.start()
for count in it.count():
returned, frame = cap.read()
if count < start_frame:
continue
if not returned or count > end_frame:
break
# b, g, r = cv2.split(frame)
# self.frames.append(cv2.merge([r, g, b]))
self.frames.append(frame)
progress_bar.update(min(len(self.frames), frame_count))
cap.release()
progress_bar.finish()
if freeze_last_frame and len(self.frames) > 0:
self.original_background = self.background = self.frames[-1]
def apply_gaussian_blur(self, ksize = (5, 5), sigmaX = 5):
self.frames = [
cv2.GaussianBlur(frame, ksize, sigmaX)
for frame in self.frames
]
def apply_edge_detection(self, threshold1 = 50, threshold2 = 100):
edged_frames = [
cv2.Canny(frame, threshold1, threshold2)
for frame in self.frames
]
for index in range(len(self.frames)):
for i in range(3):
self.frames[index][:,:,i] = edged_frames[index]

View file

@ -0,0 +1,258 @@
#!/usr/bin/env python
import numpy as np
import itertools as it
from copy import deepcopy
import sys
from animation import *
from mobject import *
from constants import *
from region import *
from scene import Scene, SceneFromVideo
from script_wrapper import command_line_create_scene
MOVIE_PREFIX = "counting_in_binary/"
BASE_HAND_FILE = os.path.join(MOVIE_DIR, MOVIE_PREFIX, "Base.mp4")
FORCED_FRAME_DURATION = 0.02
TIME_RANGE = (0, 42)
INITIAL_PADDING = 27
NUM_GOOD_FRAMES = 1223
ALGORITHM_TEXT = [
"""
\\begin{flushleft}
Turn up the rightmost finger that is down.
""", """
Turn down all fingers to its right.
\\end{flushleft}
"""
]
FINGER_WORDS = [
"Thumb",
"Index Finger",
"Middle Finger",
"Ring Finger",
"Pinky",
]
COUNT_TO_FRAME_NUM = {
0 : 0,
1 : 27,
2 : 76,
3 : 110,
4 : 163,
5 : 189,
6 : 226,
7 : 264,
8 : 318,
9 : 356,
10 : 384,
11 : 423,
12 : 457,
13 : 513,
14 : 528,
15 : 590,
16 : 620,
17 : 671,
18 : 691,
19 : 740,
20 : 781,
21 : 810,
22 : 855,
23 : 881,
24 : 940,
25 : 970,
26 : 1014,
27 : 1055,
28 : 1092,
29 : 1143,
30 : 1184,
31 : 1219,
}
class Hand(ImageMobject):
def __init__(self, num, **kwargs):
Mobject2D.__init__(self, **kwargs)
path = os.path.join(
MOVIE_DIR, MOVIE_PREFIX, "images", "Hand%d.png"%num
)
invert = False
if self.read_in_cached_attrs(path, invert):
return
ImageMobject.__init__(self, path, invert = invert)
center = self.get_center()
self.center()
self.rotate(np.pi, axis = RIGHT+UP)
self.sort_points(lambda p : np.log(complex(*p[:2])).imag)
self.rotate(np.pi, axis = RIGHT+UP)
self.shift(center)
self.cache_attrs(path, invert = False)
# def highlight_thumb(self, color = "yellow"):
# self.highlight(
# color = color,
# condition = lambda p : p[0] > 4.5 and p[1] > -1.5
# )
def get_algorithm():
return text_mobject(ALGORITHM_TEXT)
def get_finger_colors():
return list(Color("yellow").range_to("red", 5))
def five_char_binary(num):
result = bin(num)[2:]
return (5-len(result))*"0" + result
def read_reversed_binary(string):
return sum([
2**count if char == '1' else 0
for count, char in zip(it.count(), string)
])
class LeftHand(Hand):
def __init__(self, num, **kwargs):
Hand.__init__(
self,
read_reversed_binary(five_char_binary(num)),
**kwargs
)
self.rotate(np.pi, UP)
self.to_edge(LEFT)
def get_hand_map(which_hand = "right"):
if which_hand == "right":
Class = Hand
elif which_hand == "left":
Class = LeftHand
else:
print "Bad arg, bro"
return
return dict([
(num, Class(num))
for num in range(32)
])
class OverHand(SceneFromVideo):
def construct(self):
SceneFromVideo.construct(self, BASE_HAND_FILE)
self.frame_duration = FORCED_FRAME_DURATION
self.frames = self.frames[:NUM_GOOD_FRAMES]
class SaveEachNumber(OverHand):
def construct(self):
OverHand.construct(self)
for count in COUNT_TO_FRAME_NUM:
path = os.path.join(
MOVIE_DIR, MOVIE_PREFIX, "images",
"Hand%d.png"%count
)
Image.fromarray(self.frames[COUNT_TO_FRAME_NUM[count]]).save(path)
def write_to_movie(self, name = None):
print "Why bother writing to movie..."
class ShowCounting(OverHand):
def construct(self):
OverHand.construct(self)
self.frames = INITIAL_PADDING*[self.frames[0]] + self.frames
num_frames = len(self.frames)
self.frames = [
disp.paint_mobject(
self.get_counting_mob(32*count // num_frames),
frame
)
for frame, count in zip(self.frames, it.count())
]
def get_counting_mob(self, count):
mob = tex_mobject(str(count))
mob.scale(2)
mob.shift(LEFT)
mob.to_edge(UP, buff = 0.1)
return mob
class ShowFrameNum(OverHand):
def construct(self):
OverHand.construct(self)
for frame, count in zip(self.frames, it.count()):
print count, "of", len(self.frames)
mob = CompoundMobject(*[
tex_mobject(char).shift(0.3*x*RIGHT)
for char, x, in zip(str(count), it.count())
])
self.frames[count] = disp.paint_mobject(
mob.to_corner(UP+LEFT),
frame
)
class CountTo1023(Scene):
def construct(self):
rh_map = get_hand_map("right")
lh_map = get_hand_map("left")
for mob in rh_map.values()+lh_map.values():
mob.scale(0.9)
mob.to_edge(DOWN, buff = 0)
for mob in rh_map.values():
mob.to_edge(RIGHT)
for mob in lh_map.values():
mob.to_edge(LEFT)
def get_num(count):
return CompoundMobject(*[
tex_mobject(char).shift(0.35*x*RIGHT)
for char, x, in zip(str(count), it.count())
]).center().to_edge(UP)
self.frames = [
disp.paint_mobject(CompoundMobject(
rh_map[count%32], lh_map[count//32], get_num(count)
))
for count in range(2**10)
]
class Introduction(Scene):
def construct(self):
words = text_mobject("""
First, let's see how to count
to 31 on just one hand...
""")
hand = Hand(0)
for mob in words, hand:
mob.sort_points(lambda p : p[0])
self.add(words)
self.dither()
self.animate(DelayByOrder(Transform(words, hand)))
self.dither()
class ShowReadingRule(Scene):
def construct(self):
pass
class CountWithReadingRule(ShowCounting):
def get_counting_mob(self, count):
pass
class ShowIncrementRule(Scene):
def construct(self):
pass
if __name__ == "__main__":
command_line_create_scene(MOVIE_PREFIX)

View file

@ -0,0 +1,228 @@
#!/usr/bin/env python
import numpy as np
import itertools as it
from copy import deepcopy
import sys
from animation import *
from mobject import *
from constants import *
from region import *
from scene import Scene, SceneFromVideo
from script_wrapper import command_line_create_scene
MOVIE_PREFIX = "counting_in_binary/"
COUNT_TO_FRAME_NUM = {
0 : 0,
1 : 53,
2 : 84,
3 : 128,
4 : 169,
5 : 208,
6 : 238,
7 : 281,
8 : 331,
9 : 365,
10 : 395,
11 : 435,
12 : 475,
13 : 518,
14 : 556,
15 : 595,
16 : 636,
17 : 676,
18 : 709,
19 : 753,
20 : 790,
21 : 835,
22 : 869,
23 : 903,
24 : 950,
25 : 988,
26 : 1027,
27 : 1065,
28 : 1104,
29 : 1145,
30 : 1181,
31 : 1224,
32 : 1239,
}
class Hand(ImageMobject):
def __init__(self, num, **kwargs):
Mobject2D.__init__(self, **kwargs)
path = os.path.join(
MOVIE_DIR, MOVIE_PREFIX, "images", "Hand%d.png"%num
)
invert = False
if self.read_in_cached_attrs(path, invert):
return
ImageMobject.__init__(self, path, invert)
center = self.get_center()
self.center()
self.rotate(np.pi, axis = RIGHT+UP)
self.sort_points(lambda p : np.log(complex(*p[:2])).imag)
self.rotate(np.pi, axis = RIGHT+UP)
self.shift(center)
self.cache_attrs(path, invert = False)
class EdgeDetection(SceneFromVideo):
args_list = [
("CountingInBinary.m4v", 35, 70),
("CountingInBinary.m4v", 0, 100),
("CountingInBinary.m4v", 10, 50),
]
@staticmethod
def args_to_string(filename, t1, t2):
return "-".join([filename.split(".")[0], str(t1), str(t2)])
def construct(self, filename, t1, t2):
path = os.path.join(MOVIE_DIR, filename)
SceneFromVideo.construct(self, path)
self.apply_gaussian_blur()
self.apply_edge_detection(t1, t2)
class BufferedCounting(SceneFromVideo):
def construct(self):
path = os.path.join(MOVIE_DIR, "CountingInBinary.m4v")
time_range = (3, 42)
SceneFromVideo.construct(self, path, time_range = time_range)
self.buffer_pixels(spreads = (3, 2))
# self.make_all_black_or_white()
def buffer_pixels(self, spreads = (2, 2)):
ksize = (5, 5)
sigmaX = 10
threshold1 = 35
threshold2 = 70
matrices = [
thick_diagonal(dim, spread)
for dim, spread in zip(self.shape, spreads)
]
for frame, index in zip(self.frames, it.count()):
print index, "of", len(self.frames)
blurred = cv2.GaussianBlur(frame, ksize, sigmaX)
edged = cv2.Canny(blurred, threshold1, threshold2)
buffed = reduce(np.dot, [matrices[0], edged, matrices[1]])
for i in range(3):
self.frames[index][:,:,i] = buffed
def make_all_black_or_white(self):
self.frames = [
255*(frame != 0).astype('uint8')
for frame in self.frames
]
class ClearLeftSide(SceneFromVideo):
args_list = [
("BufferedCounting",),
]
@staticmethod
def args_to_string(scenename):
return scenename
def construct(self, scenename):
path = os.path.join(MOVIE_DIR, MOVIE_PREFIX, scenename + ".mp4")
SceneFromVideo.construct(self, path)
self.highlight_region_over_time_range(
Region(lambda x, y : x < -1, shape = self.shape)
)
class DraggedPixels(SceneFromVideo):
args_list = [
("BufferedCounting",),
("CountingWithLeftClear",),
]
@staticmethod
def args_to_string(*args):
return args[0]
def construct(self, video):
path = os.path.join(MOVIE_DIR, MOVIE_PREFIX, video+".mp4")
SceneFromVideo.construct(self, path)
self.drag_pixels()
def drag_pixels(self, num_frames_to_drag_over = 5):
for index in range(len(self.frames)-1, 0, -1):
self.frames[index] = np.max([
self.frames[k]
for k in range(
max(index-num_frames_to_drag_over, 0),
index
)
], axis = 0)
class SaveEachNumber(SceneFromVideo):
def construct(self):
path = os.path.join(MOVIE_DIR, MOVIE_PREFIX, "ClearLeftSideBufferedCounting.mp4")
SceneFromVideo.construct(self, path)
for count in COUNT_TO_FRAME_NUM:
path = os.path.join(
MOVIE_DIR, MOVIE_PREFIX, "images",
"Hand%d.png"%count
)
Image.fromarray(self.frames[COUNT_TO_FRAME_NUM[count]]).save(path)
class ShowCounting(SceneFromVideo):
args_list = [
("CountingWithLeftClear",),
("ClearLeftSideBufferedCounting",),
]
@staticmethod
def args_to_string(filename):
return filename
def construct(self, filename):
path = os.path.join(MOVIE_DIR, MOVIE_PREFIX, filename + ".mp4")
SceneFromVideo.construct(self, path)
total_time = len(self.frames)*self.frame_duration
for count in range(32):
print count
mob = tex_mobject(str(count)).scale(1.5)
mob.shift(0.3*LEFT).to_edge(UP, buff = 0.1)
index_range = range(
max(COUNT_TO_FRAME_NUM[count]-10, 0),
COUNT_TO_FRAME_NUM[count+1]-10
)
for index in index_range:
self.frames[index] = disp.paint_mobject(
mob, self.frames[index]
)
class ShowFrameNum(SceneFromVideo):
args_list = [
("ClearLeftSideBufferedCounting",),
]
@staticmethod
def args_to_string(filename):
return filename
def construct(self, filename):
path = os.path.join(MOVIE_DIR, MOVIE_PREFIX, filename+".mp4")
SceneFromVideo.construct(self, path)
for frame, count in zip(self.frames, it.count()):
print count, "of", len(self.frames)
mob = CompoundMobject(*[
tex_mobject(char).shift(0.3*x*RIGHT)
for char, x, in zip(str(count), it.count())
])
self.frames[count] = disp.paint_mobject(
mob.to_corner(UP+LEFT),
frame
)
if __name__ == "__main__":
command_line_create_scene(MOVIE_PREFIX)