2015-04-03 16:41:25 -07:00
|
|
|
import numpy as np
|
|
|
|
import itertools as it
|
|
|
|
import os
|
|
|
|
from PIL import Image
|
|
|
|
from random import random
|
|
|
|
|
|
|
|
from tex_utils import *
|
|
|
|
from mobject import *
|
|
|
|
|
|
|
|
class ImageMobject(Mobject2D):
|
|
|
|
"""
|
|
|
|
Automatically filters out black pixels
|
|
|
|
"""
|
|
|
|
# SHOULD_BUFF_POINTS = False
|
|
|
|
def __init__(self,
|
2015-08-17 11:12:56 -07:00
|
|
|
image_file,
|
2015-04-03 16:41:25 -07:00
|
|
|
filter_color = "black",
|
|
|
|
invert = True,
|
2015-09-24 10:54:59 -07:00
|
|
|
use_cache = True,
|
2015-04-03 16:41:25 -07:00
|
|
|
*args, **kwargs):
|
|
|
|
Mobject2D.__init__(self, *args, **kwargs)
|
2015-08-17 11:12:56 -07:00
|
|
|
self.filter_rgb = 255 * np.array(Color(filter_color).get_rgb()).astype('uint8')
|
|
|
|
self.name = to_cammel_case(
|
|
|
|
os.path.split(image_file)[-1].split(".")[0]
|
|
|
|
)
|
2015-09-24 10:54:59 -07:00
|
|
|
self.use_cache = use_cache
|
2015-08-17 11:12:56 -07:00
|
|
|
possible_paths = [
|
|
|
|
image_file,
|
|
|
|
os.path.join(IMAGE_DIR, image_file),
|
|
|
|
os.path.join(IMAGE_DIR, image_file + ".jpg"),
|
|
|
|
os.path.join(IMAGE_DIR, image_file + ".png"),
|
|
|
|
]
|
|
|
|
for path in possible_paths:
|
|
|
|
if os.path.exists(path):
|
|
|
|
self.generate_points_from_file(path, invert)
|
|
|
|
return
|
|
|
|
raise IOError("File not Found")
|
2015-04-03 16:41:25 -07:00
|
|
|
|
2015-08-17 11:12:56 -07:00
|
|
|
def generate_points_from_file(self, path, invert):
|
2015-09-24 10:54:59 -07:00
|
|
|
if self.use_cache and self.read_in_cached_attrs(path, invert):
|
2015-08-21 19:58:36 -07:00
|
|
|
return
|
|
|
|
image = Image.open(path).convert('RGB')
|
|
|
|
if invert:
|
|
|
|
image = invert_image(image)
|
|
|
|
self.generate_points_from_image_array(np.array(image))
|
|
|
|
self.cache_attrs(path, invert)
|
|
|
|
|
|
|
|
def get_cached_attr_files(self, path, invert, attrs):
|
2015-08-17 11:12:56 -07:00
|
|
|
#Hash should be unique to (path, invert) pair
|
|
|
|
unique_hash = str(hash(path+str(invert)))
|
2015-08-21 19:58:36 -07:00
|
|
|
return [
|
|
|
|
os.path.join(IMAGE_MOBJECT_DIR, unique_hash)+"."+attr
|
|
|
|
for attr in attrs
|
2015-08-17 11:12:56 -07:00
|
|
|
]
|
2015-08-21 19:58:36 -07:00
|
|
|
|
|
|
|
def read_in_cached_attrs(self, path, invert,
|
|
|
|
attrs = ("points", "rgbs"),
|
|
|
|
dtype = "float64"):
|
|
|
|
cached_attr_files = self.get_cached_attr_files(path, invert, attrs)
|
|
|
|
if all(map(os.path.exists, cached_attr_files)):
|
|
|
|
for attr, cache_file in zip(attrs, cached_attr_files):
|
|
|
|
arr = np.fromfile(cache_file, dtype = dtype)
|
|
|
|
arr = arr.reshape(arr.size/self.DIM, self.DIM)
|
|
|
|
setattr(self, attr, arr)
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def cache_attrs(self, path, invert,
|
|
|
|
attrs = ("points", "rgbs"),
|
|
|
|
dtype = "float64"):
|
|
|
|
cached_attr_files = self.get_cached_attr_files(path, invert, attrs)
|
|
|
|
for attr, cache_file in zip(attrs, cached_attr_files):
|
|
|
|
getattr(self, attr).astype(dtype).tofile(cache_file)
|
|
|
|
|
2015-08-17 11:12:56 -07:00
|
|
|
|
|
|
|
def generate_points_from_image_array(self, image_array):
|
|
|
|
height, width = image_array.shape[:2]
|
2015-04-03 16:41:25 -07:00
|
|
|
#Flatten array, and find indices where rgb is not filter_rgb
|
2015-08-17 11:12:56 -07:00
|
|
|
array = image_array.reshape((height * width, 3))
|
|
|
|
bools = array == self.filter_rgb
|
|
|
|
bools = bools[:,0]*bools[:,1]*bools[:,2]
|
|
|
|
indices = np.arange(height * width, dtype = 'int')[~bools]
|
2015-04-03 16:41:25 -07:00
|
|
|
rgbs = array[indices, :].astype('float') / 255.0
|
|
|
|
|
|
|
|
points = np.zeros((indices.size, 3), dtype = 'float64')
|
|
|
|
points[:,0] = indices%width - width/2
|
|
|
|
points[:,1] = -indices/width + height/2
|
|
|
|
|
|
|
|
height, width = map(float, (height, width))
|
|
|
|
if height / width > float(DEFAULT_HEIGHT) / DEFAULT_WIDTH:
|
|
|
|
points *= 2 * SPACE_HEIGHT / height
|
|
|
|
else:
|
|
|
|
points *= 2 * SPACE_WIDTH / width
|
|
|
|
self.add_points(points, rgbs = rgbs)
|
|
|
|
|
|
|
|
def should_buffer_points(self):
|
|
|
|
# potentially changed in subclasses
|
|
|
|
return False
|
|
|
|
|
2015-06-22 10:14:53 -07:00
|
|
|
class Face(ImageMobject):
|
|
|
|
def __init__(self, mode = "simple", *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Mode can be "simple", "talking", "straight"
|
|
|
|
"""
|
|
|
|
ImageMobject.__init__(self, mode + "_face", *args, **kwargs)
|
2015-04-03 16:41:25 -07:00
|
|
|
self.scale(0.5)
|
2015-06-22 10:14:53 -07:00
|
|
|
self.center()
|
2015-04-03 16:41:25 -07:00
|
|
|
|
|
|
|
class VideoIcon(ImageMobject):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
ImageMobject.__init__(self, "video_icon", *args, **kwargs)
|
|
|
|
self.scale(0.3)
|
2015-06-22 10:14:53 -07:00
|
|
|
self.center()
|
2015-04-03 16:41:25 -07:00
|
|
|
|
2015-08-07 18:10:00 -07:00
|
|
|
def text_mobject(text, size = None):
|
|
|
|
size = size or "\\Large" #TODO, auto-adjust?
|
2015-06-27 04:49:10 -07:00
|
|
|
return tex_mobject(text, size, TEMPLATE_TEXT_FILE)
|
2015-04-30 15:26:56 -07:00
|
|
|
|
2015-06-27 04:49:10 -07:00
|
|
|
def tex_mobject(expression,
|
2015-08-07 18:10:00 -07:00
|
|
|
size = None,
|
2015-06-27 04:49:10 -07:00
|
|
|
template_tex_file = TEMPLATE_TEX_FILE):
|
2015-08-07 18:10:00 -07:00
|
|
|
if size == None:
|
|
|
|
if len("".join(expression)) < MAX_LEN_FOR_HUGE_TEX_FONT:
|
|
|
|
size = "\\Huge"
|
|
|
|
else:
|
|
|
|
size = "\\large"
|
|
|
|
#Todo, make this more sophisticated.
|
2015-08-17 11:12:56 -07:00
|
|
|
image_files = tex_to_image(expression, size, template_tex_file)
|
|
|
|
if isinstance(image_files, list):
|
2015-04-03 16:41:25 -07:00
|
|
|
#TODO, is checking listiness really the best here?
|
2015-08-17 11:12:56 -07:00
|
|
|
result = CompoundMobject(*map(ImageMobject, image_files))
|
2015-04-03 16:41:25 -07:00
|
|
|
else:
|
2015-08-17 11:12:56 -07:00
|
|
|
result = ImageMobject(image_files)
|
2015-08-07 18:10:00 -07:00
|
|
|
return result.highlight("white").center()
|
2015-04-03 16:41:25 -07:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|