2016-02-23 22:29:32 -08:00
|
|
|
import numpy as np
|
|
|
|
import itertools as it
|
|
|
|
import os
|
2016-04-17 12:59:53 -07:00
|
|
|
|
2016-02-23 22:29:32 -08:00
|
|
|
from PIL import Image
|
|
|
|
from colour import Color
|
2016-04-09 20:03:57 -07:00
|
|
|
import aggdraw
|
2018-02-20 16:44:36 -08:00
|
|
|
import copy
|
2018-03-30 18:19:23 -07:00
|
|
|
import time
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-03-30 18:19:23 -07:00
|
|
|
from constants import *
|
|
|
|
from mobject.mobject import Mobject, Group
|
|
|
|
from mobject.point_cloud_mobject import PMobject
|
|
|
|
from mobject.vectorized_mobject import VMobject
|
|
|
|
from mobject.image_mobject import ImageMobject
|
|
|
|
from utils.color import rgb_to_hex, color_to_int_rgba
|
|
|
|
from utils.config_ops import digest_config, digest_locals, DictAsObject
|
|
|
|
from utils.images import get_full_raster_image_path
|
|
|
|
from utils.iterables import remove_list_redundancies, list_difference_update
|
|
|
|
from utils.iterables import batch_by_property
|
|
|
|
from utils.simple_functions import fdiv
|
2018-02-11 18:21:31 -08:00
|
|
|
|
2018-03-09 10:32:19 -08:00
|
|
|
|
2016-02-23 22:29:32 -08:00
|
|
|
class Camera(object):
|
2016-02-27 16:32:53 -08:00
|
|
|
CONFIG = {
|
2017-06-20 14:05:48 -07:00
|
|
|
"background_image" : None,
|
2018-03-30 11:36:06 -07:00
|
|
|
"pixel_shape" : (DEFAULT_PIXEL_HEIGHT, DEFAULT_PIXEL_WIDTH),
|
2018-03-30 11:34:22 -07:00
|
|
|
# Note: frame_shape will be resized to match pixel_shape
|
|
|
|
"frame_shape" : (FRAME_HEIGHT, FRAME_WIDTH),
|
2016-02-27 13:33:46 -08:00
|
|
|
"space_center" : ORIGIN,
|
2016-02-23 22:29:32 -08:00
|
|
|
"background_color" : BLACK,
|
2016-12-03 19:06:50 -08:00
|
|
|
#Points in vectorized mobjects with norm greater
|
|
|
|
#than this value will be rescaled.
|
2018-03-30 11:25:37 -07:00
|
|
|
"max_allowable_norm" : FRAME_WIDTH,
|
2017-09-19 13:12:45 -07:00
|
|
|
"image_mode" : "RGBA",
|
|
|
|
"n_rgb_coords" : 4,
|
2018-02-16 12:15:16 -08:00
|
|
|
"background_alpha" : 0, #Out of rgb_max_val
|
2018-02-10 18:37:34 -08:00
|
|
|
"pixel_array_dtype" : 'uint8',
|
|
|
|
"use_z_coordinate_for_display_order" : False,
|
|
|
|
# z_buff_func is only used if the flag above is set to True.
|
|
|
|
# round z coordinate to nearest hundredth when comparring
|
|
|
|
"z_buff_func" : lambda m : np.round(m.get_center()[2], 2),
|
2016-02-23 22:29:32 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
def __init__(self, background = None, **kwargs):
|
|
|
|
digest_config(self, kwargs, locals())
|
2018-02-16 12:15:16 -08:00
|
|
|
self.rgb_max_val = np.iinfo(self.pixel_array_dtype).max
|
2016-02-23 22:29:32 -08:00
|
|
|
self.init_background()
|
2018-03-30 11:34:22 -07:00
|
|
|
self.resize_frame_shape()
|
2016-02-23 22:29:32 -08:00
|
|
|
self.reset()
|
|
|
|
|
2018-02-20 16:44:36 -08:00
|
|
|
def __deepcopy__(self, memo):
|
|
|
|
# This is to address a strange bug where deepcopying
|
|
|
|
# will result in a segfault, which is somehow related
|
|
|
|
# to the aggdraw library
|
|
|
|
self.canvas = None
|
|
|
|
return copy.copy(self)
|
|
|
|
|
2018-03-30 11:34:22 -07:00
|
|
|
def resize_frame_shape(self, fixed_dimension = 0):
|
2016-02-27 13:33:46 -08:00
|
|
|
"""
|
2018-03-30 11:34:22 -07:00
|
|
|
Changes frame_shape to match the aspect ratio
|
2016-02-27 13:33:46 -08:00
|
|
|
of pixel_shape, where fixed_dimension determines
|
2018-03-30 11:34:22 -07:00
|
|
|
whether frame_shape[0] (height) or frame_shape[1] (width)
|
2016-02-27 13:33:46 -08:00
|
|
|
remains fixed while the other changes accordingly.
|
|
|
|
"""
|
|
|
|
aspect_ratio = float(self.pixel_shape[1])/self.pixel_shape[0]
|
2018-03-30 11:34:22 -07:00
|
|
|
frame_width, frame_height = self.frame_shape
|
2016-02-27 13:33:46 -08:00
|
|
|
if fixed_dimension == 0:
|
2018-03-30 11:34:22 -07:00
|
|
|
frame_height = aspect_ratio*frame_width
|
2016-02-27 13:33:46 -08:00
|
|
|
else:
|
2018-03-30 11:34:22 -07:00
|
|
|
frame_width = frame_height/aspect_ratio
|
|
|
|
self.frame_shape = (frame_width, frame_height)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
|
|
|
def init_background(self):
|
2017-06-20 14:05:48 -07:00
|
|
|
if self.background_image is not None:
|
2018-01-12 13:38:25 -08:00
|
|
|
path = get_full_raster_image_path(self.background_image)
|
2017-09-19 13:12:45 -07:00
|
|
|
image = Image.open(path).convert(self.image_mode)
|
2017-06-20 14:05:48 -07:00
|
|
|
height, width = self.pixel_shape
|
|
|
|
#TODO, how to gracefully handle backgrounds
|
|
|
|
#with different sizes?
|
|
|
|
self.background = np.array(image)[:height, :width]
|
2017-09-26 17:41:45 -07:00
|
|
|
self.background = self.background.astype(self.pixel_array_dtype)
|
2016-02-23 22:29:32 -08:00
|
|
|
else:
|
2017-09-19 13:12:45 -07:00
|
|
|
background_rgba = color_to_int_rgba(
|
2017-09-26 17:41:45 -07:00
|
|
|
self.background_color, alpha = self.background_alpha
|
2017-09-19 13:12:45 -07:00
|
|
|
)
|
2016-02-27 13:33:46 -08:00
|
|
|
self.background = np.zeros(
|
2017-09-19 13:12:45 -07:00
|
|
|
list(self.pixel_shape)+[self.n_rgb_coords],
|
2017-09-26 17:41:45 -07:00
|
|
|
dtype = self.pixel_array_dtype
|
2016-02-23 22:29:32 -08:00
|
|
|
)
|
2017-09-19 13:12:45 -07:00
|
|
|
self.background[:,:] = background_rgba
|
2016-02-23 22:29:32 -08:00
|
|
|
|
|
|
|
def get_image(self):
|
2017-09-26 17:41:45 -07:00
|
|
|
return Image.fromarray(
|
|
|
|
self.pixel_array,
|
|
|
|
mode = self.image_mode
|
|
|
|
)
|
|
|
|
|
|
|
|
def get_pixel_array(self):
|
|
|
|
return self.pixel_array
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-31 17:17:58 -08:00
|
|
|
def convert_pixel_array(self, pixel_array, convert_from_floats = False):
|
|
|
|
retval = np.array(pixel_array)
|
|
|
|
if convert_from_floats:
|
|
|
|
retval = np.apply_along_axis(
|
2018-02-16 12:15:16 -08:00
|
|
|
lambda f : (f * self.rgb_max_val).astype(self.pixel_array_dtype),
|
2018-01-31 17:17:58 -08:00
|
|
|
2,
|
|
|
|
retval)
|
|
|
|
return retval
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-31 17:17:58 -08:00
|
|
|
def set_pixel_array(self, pixel_array, convert_from_floats = False):
|
2018-02-10 18:37:34 -08:00
|
|
|
converted_array = self.convert_pixel_array(pixel_array, convert_from_floats)
|
2018-02-26 11:47:37 -08:00
|
|
|
if not (hasattr(self, "pixel_array") and self.pixel_array.shape == converted_array.shape):
|
2018-02-10 18:37:34 -08:00
|
|
|
self.pixel_array = converted_array
|
|
|
|
else:
|
|
|
|
#Set in place
|
|
|
|
self.pixel_array[:,:,:] = converted_array[:,:,:]
|
2018-01-31 17:17:58 -08:00
|
|
|
|
|
|
|
def set_background(self, pixel_array, convert_from_floats = False):
|
|
|
|
self.background = self.convert_pixel_array(pixel_array, convert_from_floats)
|
|
|
|
|
2018-02-06 12:44:38 -08:00
|
|
|
def make_background_from_func(self, coords_to_colors_func):
|
2018-01-31 17:17:58 -08:00
|
|
|
"""
|
|
|
|
Sets background by using coords_to_colors_func to determine each pixel's color. Each input
|
|
|
|
to coords_to_colors_func is an (x, y) pair in space (in ordinary space coordinates; not
|
|
|
|
pixel coordinates), and each output is expected to be an RGBA array of 4 floats.
|
|
|
|
"""
|
|
|
|
|
2018-03-09 10:32:19 -08:00
|
|
|
print "Starting set_background; for reference, the current time is ", time.strftime("%H:%M:%S")
|
2018-01-31 17:17:58 -08:00
|
|
|
coords = self.get_coords_of_all_pixels()
|
|
|
|
new_background = np.apply_along_axis(
|
|
|
|
coords_to_colors_func,
|
|
|
|
2,
|
|
|
|
coords
|
|
|
|
)
|
2018-03-09 10:32:19 -08:00
|
|
|
print "Ending set_background; for reference, the current time is ", time.strftime("%H:%M:%S")
|
|
|
|
|
2018-02-06 12:44:38 -08:00
|
|
|
return self.convert_pixel_array(new_background, convert_from_floats = True)
|
2018-01-31 17:17:58 -08:00
|
|
|
|
2018-02-06 12:44:38 -08:00
|
|
|
def set_background_from_func(self, coords_to_colors_func):
|
|
|
|
self.set_background(self.make_background_from_func(coords_to_colors_func))
|
2016-11-23 17:50:25 -08:00
|
|
|
|
2016-02-23 22:29:32 -08:00
|
|
|
def reset(self):
|
2018-01-30 13:51:22 -08:00
|
|
|
self.set_pixel_array(self.background)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-17 21:32:50 -08:00
|
|
|
####
|
|
|
|
|
|
|
|
def extract_mobject_family_members(self, mobjects, only_those_with_points = False):
|
|
|
|
if only_those_with_points:
|
|
|
|
method = Mobject.family_members_with_points
|
|
|
|
else:
|
|
|
|
method = Mobject.submobject_family
|
|
|
|
return remove_list_redundancies(list(
|
|
|
|
it.chain(*[
|
|
|
|
method(m)
|
|
|
|
for m in mobjects
|
|
|
|
if not (isinstance(m, VMobject) and m.is_subpath)
|
|
|
|
])
|
|
|
|
))
|
|
|
|
|
2018-01-24 12:14:37 -08:00
|
|
|
def get_mobjects_to_display(
|
2018-01-17 21:32:50 -08:00
|
|
|
self, mobjects,
|
|
|
|
include_submobjects = True,
|
|
|
|
excluded_mobjects = None,
|
|
|
|
):
|
2016-04-17 00:31:38 -07:00
|
|
|
if include_submobjects:
|
2018-01-17 21:32:50 -08:00
|
|
|
mobjects = self.extract_mobject_family_members(
|
|
|
|
mobjects, only_those_with_points = True
|
|
|
|
)
|
|
|
|
if excluded_mobjects:
|
|
|
|
all_excluded = self.extract_mobject_family_members(
|
|
|
|
excluded_mobjects
|
|
|
|
)
|
|
|
|
mobjects = list_difference_update(mobjects, all_excluded)
|
2018-02-01 16:32:19 -08:00
|
|
|
|
2018-02-10 18:37:34 -08:00
|
|
|
if self.use_z_coordinate_for_display_order:
|
|
|
|
# Should perhaps think about what happens here when include_submobjects is False,
|
|
|
|
# (for now, the onus is then on the caller to ensure this is handled correctly by
|
|
|
|
# passing us an appropriately pre-flattened list of mobjects if need be)
|
|
|
|
return sorted(
|
|
|
|
mobjects,
|
|
|
|
lambda a, b: cmp(self.z_buff_func(a), self.z_buff_func(b))
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return mobjects
|
2018-01-24 12:14:37 -08:00
|
|
|
|
|
|
|
def capture_mobject(self, mobject, **kwargs):
|
|
|
|
return self.capture_mobjects([mobject], **kwargs)
|
|
|
|
|
|
|
|
def capture_mobjects(self, mobjects, **kwargs):
|
|
|
|
mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
|
2018-02-16 10:57:04 -08:00
|
|
|
|
|
|
|
# Organize this list into batches of the same type, and
|
|
|
|
# apply corresponding function to those batches
|
|
|
|
type_func_pairs = [
|
|
|
|
(VMobject, self.display_multiple_vectorized_mobjects),
|
|
|
|
(PMobject, self.display_multiple_point_cloud_mobjects),
|
|
|
|
(ImageMobject, self.display_multiple_image_mobjects),
|
|
|
|
(Mobject, lambda batch : batch), #Do nothing
|
|
|
|
]
|
|
|
|
def get_mobject_type(mobject):
|
|
|
|
for mobject_type, func in type_func_pairs:
|
|
|
|
if isinstance(mobject, mobject_type):
|
|
|
|
return mobject_type
|
|
|
|
raise Exception(
|
|
|
|
"Trying to display something which is not of type Mobject"
|
|
|
|
)
|
2018-02-16 11:14:19 -08:00
|
|
|
batch_type_pairs = batch_by_property(mobjects, get_mobject_type)
|
2018-02-16 10:57:04 -08:00
|
|
|
|
|
|
|
#Display in these batches
|
2018-02-16 11:14:19 -08:00
|
|
|
for batch, batch_type in batch_type_pairs:
|
2018-02-16 10:57:04 -08:00
|
|
|
#check what the type is, and call the appropriate function
|
|
|
|
for mobject_type, func in type_func_pairs:
|
2018-02-16 11:14:19 -08:00
|
|
|
if batch_type == mobject_type:
|
2018-02-16 10:57:04 -08:00
|
|
|
func(batch)
|
2016-09-06 16:48:04 -07:00
|
|
|
|
2018-02-10 18:37:34 -08:00
|
|
|
## Methods associated with svg rendering
|
|
|
|
|
|
|
|
def get_aggdraw_canvas(self):
|
2018-02-20 16:44:36 -08:00
|
|
|
if not hasattr(self, "canvas") or not self.canvas:
|
2018-02-10 18:37:34 -08:00
|
|
|
self.reset_aggdraw_canvas()
|
|
|
|
return self.canvas
|
|
|
|
|
|
|
|
def reset_aggdraw_canvas(self):
|
|
|
|
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
|
|
|
|
self.canvas = aggdraw.Draw(image)
|
|
|
|
|
2016-12-02 13:12:58 -08:00
|
|
|
def display_multiple_vectorized_mobjects(self, vmobjects):
|
|
|
|
if len(vmobjects) == 0:
|
|
|
|
return
|
2018-02-16 11:14:19 -08:00
|
|
|
batch_file_pairs = batch_by_property(
|
2018-02-11 19:00:09 -08:00
|
|
|
vmobjects,
|
|
|
|
lambda vm : vm.get_background_image_file()
|
|
|
|
)
|
2018-02-16 11:14:19 -08:00
|
|
|
for batch, file_name in batch_file_pairs:
|
|
|
|
if file_name:
|
2018-02-11 19:00:09 -08:00
|
|
|
self.display_multiple_background_colored_vmobject(batch)
|
|
|
|
else:
|
|
|
|
self.display_multiple_non_background_colored_vmobjects(batch)
|
|
|
|
|
|
|
|
def display_multiple_non_background_colored_vmobjects(self, vmobjects):
|
|
|
|
self.reset_aggdraw_canvas()
|
2018-02-10 18:37:34 -08:00
|
|
|
canvas = self.get_aggdraw_canvas()
|
2016-04-17 12:59:53 -07:00
|
|
|
for vmobject in vmobjects:
|
2018-02-11 19:00:09 -08:00
|
|
|
self.display_vectorized(vmobject, canvas)
|
2016-09-06 16:48:04 -07:00
|
|
|
canvas.flush()
|
2017-09-19 13:12:45 -07:00
|
|
|
|
2018-02-10 18:37:34 -08:00
|
|
|
def display_vectorized(self, vmobject, canvas = None):
|
2016-04-14 19:30:47 -07:00
|
|
|
if vmobject.is_subpath:
|
2016-04-12 21:57:53 -07:00
|
|
|
#Subpath vectorized mobjects are taken care
|
|
|
|
#of by their parent
|
|
|
|
return
|
2018-02-10 18:37:34 -08:00
|
|
|
canvas = canvas or self.get_aggdraw_canvas()
|
2016-04-14 19:30:47 -07:00
|
|
|
pen, fill = self.get_pen_and_fill(vmobject)
|
|
|
|
pathstring = self.get_pathstring(vmobject)
|
2016-04-10 12:34:28 -07:00
|
|
|
symbol = aggdraw.Symbol(pathstring)
|
|
|
|
canvas.symbol((0, 0), symbol, pen, fill)
|
2016-04-17 12:59:53 -07:00
|
|
|
|
2016-04-14 19:30:47 -07:00
|
|
|
def get_pen_and_fill(self, vmobject):
|
2018-02-10 21:38:12 -08:00
|
|
|
stroke_width = max(vmobject.get_stroke_width(), 0)
|
|
|
|
if stroke_width == 0:
|
|
|
|
pen = None
|
|
|
|
else:
|
|
|
|
stroke_rgb = self.get_stroke_rgb(vmobject)
|
|
|
|
stroke_hex = rgb_to_hex(stroke_rgb)
|
|
|
|
pen = aggdraw.Pen(stroke_hex, stroke_width)
|
|
|
|
|
2018-02-16 12:15:16 -08:00
|
|
|
fill_opacity = int(self.rgb_max_val*vmobject.get_fill_opacity())
|
2018-02-10 21:38:12 -08:00
|
|
|
if fill_opacity == 0:
|
|
|
|
fill = None
|
|
|
|
else:
|
|
|
|
fill_rgb = self.get_fill_rgb(vmobject)
|
|
|
|
fill_hex = rgb_to_hex(fill_rgb)
|
|
|
|
fill = aggdraw.Brush(fill_hex, fill_opacity)
|
2018-02-10 22:19:00 -08:00
|
|
|
|
2016-04-09 20:03:57 -07:00
|
|
|
return (pen, fill)
|
|
|
|
|
2017-10-25 14:20:45 -07:00
|
|
|
def color_to_hex_l(self, color):
|
|
|
|
try:
|
|
|
|
return color.get_hex_l()
|
|
|
|
except:
|
|
|
|
return Color(BLACK).get_hex_l()
|
|
|
|
|
2018-02-10 21:38:12 -08:00
|
|
|
def get_stroke_rgb(self, vmobject):
|
|
|
|
return vmobject.get_stroke_rgb()
|
2017-02-02 15:36:24 -08:00
|
|
|
|
2018-02-10 21:38:12 -08:00
|
|
|
def get_fill_rgb(self, vmobject):
|
|
|
|
return vmobject.get_fill_rgb()
|
2017-02-02 15:36:24 -08:00
|
|
|
|
2016-04-14 19:30:47 -07:00
|
|
|
def get_pathstring(self, vmobject):
|
2018-02-10 21:38:12 -08:00
|
|
|
result = ""
|
2016-04-17 12:59:53 -07:00
|
|
|
for mob in [vmobject]+vmobject.get_subpath_mobjects():
|
2016-04-12 21:57:53 -07:00
|
|
|
points = mob.points
|
2016-12-06 13:29:21 -08:00
|
|
|
# points = self.adjust_out_of_range_points(points)
|
2016-04-13 20:30:26 -07:00
|
|
|
if len(points) == 0:
|
|
|
|
continue
|
2018-02-10 21:19:26 -08:00
|
|
|
aligned_points = self.align_points_to_camera(points)
|
|
|
|
coords = self.points_to_pixel_coords(aligned_points)
|
2018-02-10 22:19:00 -08:00
|
|
|
coord_strings = coords.flatten().astype(str)
|
2018-02-10 21:19:26 -08:00
|
|
|
#Start new path string with M
|
|
|
|
coord_strings[0] = "M" + coord_strings[0]
|
|
|
|
#The C at the start of every 6th number communicates
|
|
|
|
#that the following 6 define a cubic Bezier
|
|
|
|
coord_strings[2::6] = map(lambda s : "C" + str(s), coord_strings[2::6])
|
|
|
|
#Possibly finish with "Z"
|
|
|
|
if vmobject.mark_paths_closed:
|
|
|
|
coord_strings[-1] = coord_strings[-1] + " Z"
|
|
|
|
result += " ".join(coord_strings)
|
2016-04-12 21:57:53 -07:00
|
|
|
return result
|
2016-04-09 20:03:57 -07:00
|
|
|
|
2018-02-11 18:21:31 -08:00
|
|
|
def get_background_colored_vmobject_displayer(self):
|
|
|
|
#Quite wordy to type out a bunch
|
|
|
|
long_name = "background_colored_vmobject_displayer"
|
|
|
|
if not hasattr(self, long_name):
|
|
|
|
setattr(self, long_name, BackgroundColoredVMobjectDisplayer(self))
|
|
|
|
return getattr(self, long_name)
|
|
|
|
|
2018-02-11 19:00:09 -08:00
|
|
|
def display_multiple_background_colored_vmobject(self, cvmobjects):
|
2018-02-11 18:21:31 -08:00
|
|
|
displayer = self.get_background_colored_vmobject_displayer()
|
2018-02-11 19:00:09 -08:00
|
|
|
cvmobject_pixel_array = displayer.display(*cvmobjects)
|
2018-02-16 12:15:16 -08:00
|
|
|
self.overlay_rgba_array(cvmobject_pixel_array)
|
2018-02-11 18:21:31 -08:00
|
|
|
return self
|
2018-02-01 21:56:09 -08:00
|
|
|
|
2018-02-10 21:19:26 -08:00
|
|
|
## Methods for other rendering
|
|
|
|
|
2018-02-16 10:57:04 -08:00
|
|
|
def display_multiple_point_cloud_mobjects(self, pmobjects):
|
|
|
|
for pmobject in pmobjects:
|
|
|
|
self.display_point_cloud(
|
|
|
|
pmobject.points,
|
|
|
|
pmobject.rgbas,
|
|
|
|
self.adjusted_thickness(pmobject.stroke_width)
|
|
|
|
)
|
|
|
|
|
2017-09-19 13:12:45 -07:00
|
|
|
def display_point_cloud(self, points, rgbas, thickness):
|
2016-02-23 22:29:32 -08:00
|
|
|
if len(points) == 0:
|
|
|
|
return
|
|
|
|
points = self.align_points_to_camera(points)
|
|
|
|
pixel_coords = self.points_to_pixel_coords(points)
|
|
|
|
pixel_coords = self.thickened_coordinates(
|
|
|
|
pixel_coords, thickness
|
|
|
|
)
|
2018-01-17 15:13:30 -08:00
|
|
|
rgba_len = self.pixel_array.shape[2]
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-02-16 12:15:16 -08:00
|
|
|
rgbas = (self.rgb_max_val*rgbas).astype(self.pixel_array_dtype)
|
2016-02-23 22:29:32 -08:00
|
|
|
target_len = len(pixel_coords)
|
2017-09-19 13:12:45 -07:00
|
|
|
factor = target_len/len(rgbas)
|
2018-01-17 15:13:30 -08:00
|
|
|
rgbas = np.array([rgbas]*factor).reshape((target_len, rgba_len))
|
2016-02-23 22:29:32 -08:00
|
|
|
|
|
|
|
on_screen_indices = self.on_screen_pixels(pixel_coords)
|
|
|
|
pixel_coords = pixel_coords[on_screen_indices]
|
2017-09-19 13:12:45 -07:00
|
|
|
rgbas = rgbas[on_screen_indices]
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2016-02-27 13:33:46 -08:00
|
|
|
ph, pw = self.pixel_shape
|
|
|
|
|
|
|
|
flattener = np.array([1, pw], dtype = 'int')
|
2016-02-23 22:29:32 -08:00
|
|
|
flattener = flattener.reshape((2, 1))
|
|
|
|
indices = np.dot(pixel_coords, flattener)[:,0]
|
|
|
|
indices = indices.astype('int')
|
2016-02-27 13:33:46 -08:00
|
|
|
|
2018-01-17 15:13:30 -08:00
|
|
|
new_pa = self.pixel_array.reshape((ph*pw, rgba_len))
|
2017-09-19 13:12:45 -07:00
|
|
|
new_pa[indices] = rgbas
|
2018-01-17 15:13:30 -08:00
|
|
|
self.pixel_array = new_pa.reshape((ph, pw, rgba_len))
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-02-16 10:57:04 -08:00
|
|
|
def display_multiple_image_mobjects(self, image_mobjects):
|
|
|
|
for image_mobject in image_mobjects:
|
|
|
|
self.display_image_mobject(image_mobject)
|
|
|
|
|
2017-09-18 17:15:49 -07:00
|
|
|
def display_image_mobject(self, image_mobject):
|
|
|
|
corner_coords = self.points_to_pixel_coords(image_mobject.points)
|
|
|
|
ul_coords, ur_coords, dl_coords = corner_coords
|
|
|
|
right_vect = ur_coords - ul_coords
|
|
|
|
down_vect = dl_coords - ul_coords
|
|
|
|
|
|
|
|
impa = image_mobject.pixel_array
|
|
|
|
|
|
|
|
oh, ow = self.pixel_array.shape[:2] #Outer width and height
|
|
|
|
ih, iw = impa.shape[:2] #inner with and height
|
|
|
|
rgb_len = self.pixel_array.shape[2]
|
|
|
|
|
2017-09-26 17:41:45 -07:00
|
|
|
image = np.zeros((oh, ow, rgb_len), dtype = self.pixel_array_dtype)
|
|
|
|
|
|
|
|
if right_vect[1] == 0 and down_vect[0] == 0:
|
|
|
|
rv0 = right_vect[0]
|
|
|
|
dv1 = down_vect[1]
|
|
|
|
x_indices = np.arange(rv0, dtype = 'int')*iw/rv0
|
|
|
|
y_indices = np.arange(dv1, dtype = 'int')*ih/dv1
|
|
|
|
stretched_impa = impa[y_indices][:,x_indices]
|
|
|
|
|
|
|
|
x0, x1 = ul_coords[0], ur_coords[0]
|
|
|
|
y0, y1 = ul_coords[1], dl_coords[1]
|
|
|
|
if x0 >= ow or x1 < 0 or y0 >= oh or y1 < 0:
|
|
|
|
return
|
|
|
|
siy0 = max(-y0, 0) #stretched_impa y0
|
|
|
|
siy1 = dv1 - max(y1-oh, 0)
|
|
|
|
six0 = max(-x0, 0)
|
|
|
|
six1 = rv0 - max(x1-ow, 0)
|
|
|
|
x0 = max(x0, 0)
|
|
|
|
y0 = max(y0, 0)
|
|
|
|
image[y0:y1, x0:x1] = stretched_impa[siy0:siy1, six0:six1]
|
|
|
|
else:
|
2018-02-24 19:09:08 -08:00
|
|
|
# Alternate (slower) tactic if image is tilted
|
2017-09-26 17:41:45 -07:00
|
|
|
# List of all coordinates of pixels, given as (x, y),
|
|
|
|
# which matches the return type of points_to_pixel_coords,
|
|
|
|
# even though np.array indexing naturally happens as (y, x)
|
|
|
|
all_pixel_coords = np.zeros((oh*ow, 2), dtype = 'int')
|
|
|
|
a = np.arange(oh*ow, dtype = 'int')
|
|
|
|
all_pixel_coords[:,0] = a%ow
|
|
|
|
all_pixel_coords[:,1] = a/ow
|
|
|
|
|
|
|
|
recentered_coords = all_pixel_coords - ul_coords
|
|
|
|
coord_norms = np.linalg.norm(recentered_coords, axis = 1)
|
|
|
|
|
|
|
|
with np.errstate(divide = 'ignore'):
|
|
|
|
ix_coords, iy_coords = [
|
|
|
|
np.divide(
|
|
|
|
dim*np.dot(recentered_coords, vect),
|
|
|
|
np.dot(vect, vect),
|
|
|
|
)
|
|
|
|
for vect, dim in (right_vect, iw), (down_vect, ih)
|
|
|
|
]
|
|
|
|
to_change = reduce(op.and_, [
|
|
|
|
ix_coords >= 0, ix_coords < iw,
|
|
|
|
iy_coords >= 0, iy_coords < ih,
|
|
|
|
])
|
|
|
|
n_to_change = np.sum(to_change)
|
|
|
|
inner_flat_coords = iw*iy_coords[to_change] + ix_coords[to_change]
|
|
|
|
flat_impa = impa.reshape((iw*ih, rgb_len))
|
|
|
|
target_rgbas = flat_impa[inner_flat_coords, :]
|
|
|
|
|
|
|
|
image = image.reshape((ow*oh, rgb_len))
|
|
|
|
image[to_change] = target_rgbas
|
|
|
|
image = image.reshape((oh, ow, rgb_len))
|
2017-09-19 13:12:45 -07:00
|
|
|
self.overlay_rgba_array(image)
|
|
|
|
|
|
|
|
def overlay_rgba_array(self, arr):
|
2018-02-16 12:15:16 -08:00
|
|
|
fg = arr
|
|
|
|
bg = self.pixel_array
|
|
|
|
# rgba_max_val = self.rgb_max_val
|
|
|
|
src_rgb, src_a, dst_rgb, dst_a = [
|
|
|
|
a.astype(np.float32)/self.rgb_max_val
|
|
|
|
for a in fg[...,:3], fg[...,3], bg[...,:3], bg[...,3]
|
|
|
|
]
|
|
|
|
|
|
|
|
out_a = src_a + dst_a*(1.0-src_a)
|
2018-02-20 12:33:58 -08:00
|
|
|
|
|
|
|
# When the output alpha is 0 for full transparency,
|
|
|
|
# we have a choice over what RGB value to use in our
|
2018-02-20 21:54:40 -08:00
|
|
|
# output representation. We choose 0 here.
|
2018-02-16 12:15:16 -08:00
|
|
|
out_rgb = fdiv(
|
|
|
|
src_rgb*src_a[..., None] + \
|
|
|
|
dst_rgb*dst_a[..., None]*(1.0-src_a[..., None]),
|
2018-02-20 12:33:58 -08:00
|
|
|
out_a[..., None],
|
2018-02-20 21:54:40 -08:00
|
|
|
zero_over_zero_value = 0
|
2018-02-16 12:15:16 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
self.pixel_array[..., :3] = out_rgb*self.rgb_max_val
|
|
|
|
self.pixel_array[..., 3] = out_a*self.rgb_max_val
|
2017-09-18 17:15:49 -07:00
|
|
|
|
2016-02-23 22:29:32 -08:00
|
|
|
def align_points_to_camera(self, points):
|
|
|
|
## This is where projection should live
|
|
|
|
return points - self.space_center
|
|
|
|
|
2016-12-03 19:06:50 -08:00
|
|
|
def adjust_out_of_range_points(self, points):
|
|
|
|
if not np.any(points > self.max_allowable_norm):
|
|
|
|
return points
|
|
|
|
norms = np.apply_along_axis(np.linalg.norm, 1, points)
|
|
|
|
violator_indices = norms > self.max_allowable_norm
|
|
|
|
violators = points[violator_indices,:]
|
|
|
|
violator_norms = norms[violator_indices]
|
|
|
|
reshaped_norms = np.repeat(
|
|
|
|
violator_norms.reshape((len(violator_norms), 1)),
|
|
|
|
points.shape[1], 1
|
|
|
|
)
|
|
|
|
rescaled = self.max_allowable_norm * violators / reshaped_norms
|
|
|
|
points[violator_indices] = rescaled
|
|
|
|
return points
|
|
|
|
|
2016-02-23 22:29:32 -08:00
|
|
|
def points_to_pixel_coords(self, points):
|
|
|
|
result = np.zeros((len(points), 2))
|
2016-02-27 13:33:46 -08:00
|
|
|
ph, pw = self.pixel_shape
|
2018-03-30 11:34:22 -07:00
|
|
|
sh, sw = self.frame_shape
|
|
|
|
width_mult = pw/sw
|
2016-02-27 13:33:46 -08:00
|
|
|
width_add = pw/2
|
2018-03-30 11:34:22 -07:00
|
|
|
height_mult = ph/sh
|
2016-02-27 13:33:46 -08:00
|
|
|
height_add = ph/2
|
2016-02-23 22:29:32 -08:00
|
|
|
#Flip on y-axis as you go
|
|
|
|
height_mult *= -1
|
|
|
|
|
|
|
|
result[:,0] = points[:,0]*width_mult + width_add
|
|
|
|
result[:,1] = points[:,1]*height_mult + height_add
|
|
|
|
return result.astype('int')
|
|
|
|
|
|
|
|
def on_screen_pixels(self, pixel_coords):
|
|
|
|
return reduce(op.and_, [
|
|
|
|
pixel_coords[:,0] >= 0,
|
2016-02-27 13:33:46 -08:00
|
|
|
pixel_coords[:,0] < self.pixel_shape[1],
|
2016-02-23 22:29:32 -08:00
|
|
|
pixel_coords[:,1] >= 0,
|
2016-02-27 13:33:46 -08:00
|
|
|
pixel_coords[:,1] < self.pixel_shape[0],
|
2016-02-23 22:29:32 -08:00
|
|
|
])
|
|
|
|
|
|
|
|
def adjusted_thickness(self, thickness):
|
2016-03-07 19:07:00 -08:00
|
|
|
big_shape = PRODUCTION_QUALITY_CAMERA_CONFIG["pixel_shape"]
|
|
|
|
factor = sum(big_shape)/sum(self.pixel_shape)
|
|
|
|
return 1 + (thickness-1)/factor
|
2016-02-23 22:29:32 -08:00
|
|
|
|
|
|
|
def get_thickening_nudges(self, thickness):
|
|
|
|
_range = range(-thickness/2+1, thickness/2+1)
|
2018-01-17 12:18:47 -08:00
|
|
|
return np.array(list(it.product(_range, _range)))
|
2016-02-23 22:29:32 -08:00
|
|
|
|
|
|
|
def thickened_coordinates(self, pixel_coords, thickness):
|
|
|
|
nudges = self.get_thickening_nudges(thickness)
|
|
|
|
pixel_coords = np.array([
|
|
|
|
pixel_coords + nudge
|
|
|
|
for nudge in nudges
|
|
|
|
])
|
|
|
|
size = pixel_coords.size
|
|
|
|
return pixel_coords.reshape((size/2, 2))
|
|
|
|
|
2018-01-31 17:17:58 -08:00
|
|
|
def get_coords_of_all_pixels(self):
|
2018-02-01 16:32:19 -08:00
|
|
|
# These are in x, y order, to help me keep things straight
|
2018-03-30 11:34:22 -07:00
|
|
|
full_space_dims = np.array(self.frame_shape)[::-1]
|
2018-02-01 16:32:19 -08:00
|
|
|
full_pixel_dims = np.array(self.pixel_shape)[::-1]
|
|
|
|
|
|
|
|
# These are addressed in the same y, x order as in pixel_array, but the values in them
|
|
|
|
# are listed in x, y order
|
|
|
|
uncentered_pixel_coords = np.indices(self.pixel_shape)[::-1].transpose(1, 2, 0)
|
|
|
|
uncentered_space_coords = fdiv(
|
|
|
|
uncentered_pixel_coords * full_space_dims,
|
|
|
|
full_pixel_dims)
|
2018-01-31 17:17:58 -08:00
|
|
|
# Could structure above line's computation slightly differently, but figured (without much
|
2018-03-30 11:34:22 -07:00
|
|
|
# thought) multiplying by frame_shape first, THEN dividing by pixel_shape, is probably
|
2018-01-31 17:17:58 -08:00
|
|
|
# better than the other order, for avoiding underflow quantization in the division (whereas
|
|
|
|
# overflow is unlikely to be a problem)
|
2018-01-30 13:51:22 -08:00
|
|
|
|
2018-02-01 16:32:19 -08:00
|
|
|
centered_space_coords = (uncentered_space_coords - fdiv(full_space_dims, 2))
|
2018-01-30 13:51:22 -08:00
|
|
|
|
2018-02-01 16:32:19 -08:00
|
|
|
# Have to also flip the y coordinates to account for pixel array being listed in
|
|
|
|
# top-to-bottom order, opposite of screen coordinate convention
|
|
|
|
centered_space_coords = centered_space_coords * (1, -1)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-02-01 16:32:19 -08:00
|
|
|
return centered_space_coords
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-02-11 18:21:31 -08:00
|
|
|
class BackgroundColoredVMobjectDisplayer(object):
|
|
|
|
def __init__(self, camera):
|
|
|
|
self.camera = camera
|
|
|
|
self.file_name_to_pixel_array_map = {}
|
|
|
|
self.init_canvas()
|
|
|
|
|
|
|
|
def init_canvas(self):
|
|
|
|
self.pixel_array = np.zeros(
|
|
|
|
self.camera.pixel_array.shape,
|
|
|
|
dtype = self.camera.pixel_array_dtype,
|
|
|
|
)
|
|
|
|
self.reset_canvas()
|
|
|
|
|
|
|
|
def reset_canvas(self):
|
|
|
|
image = Image.fromarray(self.pixel_array, mode = self.camera.image_mode)
|
|
|
|
self.canvas = aggdraw.Draw(image)
|
|
|
|
|
|
|
|
def resize_background_array(
|
|
|
|
self, background_array,
|
|
|
|
new_width, new_height,
|
|
|
|
mode = "RGBA"
|
|
|
|
):
|
|
|
|
image = Image.fromarray(background_array, mode = mode)
|
|
|
|
resized_image = image.resize((new_width, new_height))
|
|
|
|
return np.array(resized_image)
|
|
|
|
|
|
|
|
def resize_background_array_to_match(self, background_array, pixel_array):
|
|
|
|
height, width = pixel_array.shape[:2]
|
|
|
|
mode = "RGBA" if pixel_array.shape[2] == 4 else "RGB"
|
|
|
|
return self.resize_background_array(background_array, width, height, mode)
|
|
|
|
|
2018-02-16 11:14:19 -08:00
|
|
|
def get_background_array(self, file_name):
|
2018-02-11 18:21:31 -08:00
|
|
|
if file_name in self.file_name_to_pixel_array_map:
|
|
|
|
return self.file_name_to_pixel_array_map[file_name]
|
|
|
|
full_path = get_full_raster_image_path(file_name)
|
|
|
|
image = Image.open(full_path)
|
|
|
|
array = np.array(image)
|
|
|
|
|
|
|
|
camera = self.camera
|
|
|
|
if not np.all(camera.pixel_array.shape == array.shape):
|
|
|
|
array = self.resize_background_array_to_match(array, camera.pixel_array)
|
|
|
|
|
|
|
|
self.file_name_to_pixel_array_map[file_name] = array
|
|
|
|
return array
|
|
|
|
|
2018-02-11 19:00:09 -08:00
|
|
|
def display(self, *cvmobjects):
|
2018-02-16 11:14:19 -08:00
|
|
|
batch_image_file_pairs = batch_by_property(
|
2018-02-11 19:00:09 -08:00
|
|
|
cvmobjects, lambda cv : cv.get_background_image_file()
|
2018-02-11 18:21:31 -08:00
|
|
|
)
|
2018-02-11 19:00:09 -08:00
|
|
|
curr_array = None
|
2018-02-16 11:14:19 -08:00
|
|
|
for batch, image_file in batch_image_file_pairs:
|
|
|
|
background_array = self.get_background_array(image_file)
|
2018-02-11 19:00:09 -08:00
|
|
|
for cvmobject in batch:
|
|
|
|
self.camera.display_vectorized(cvmobject, self.canvas)
|
|
|
|
self.canvas.flush()
|
|
|
|
new_array = np.array(
|
|
|
|
(background_array*self.pixel_array.astype('float')/255),
|
|
|
|
dtype = self.camera.pixel_array_dtype
|
|
|
|
)
|
|
|
|
if curr_array is None:
|
|
|
|
curr_array = new_array
|
|
|
|
else:
|
|
|
|
curr_array = np.maximum(curr_array, new_array)
|
|
|
|
self.pixel_array[:,:] = 0
|
|
|
|
self.reset_canvas()
|
|
|
|
return curr_array
|
2018-02-11 18:21:31 -08:00
|
|
|
|
|
|
|
|
2016-02-27 12:44:52 -08:00
|
|
|
class MovingCamera(Camera):
|
|
|
|
"""
|
|
|
|
Stays in line with the height, width and position
|
|
|
|
of a given mobject
|
|
|
|
"""
|
2016-02-27 16:32:53 -08:00
|
|
|
CONFIG = {
|
2016-02-27 16:29:11 -08:00
|
|
|
"aligned_dimension" : "width" #or height
|
2016-02-27 13:33:46 -08:00
|
|
|
}
|
2016-02-27 12:44:52 -08:00
|
|
|
def __init__(self, mobject, **kwargs):
|
|
|
|
digest_locals(self)
|
|
|
|
Camera.__init__(self, **kwargs)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2016-02-27 12:44:52 -08:00
|
|
|
def capture_mobjects(self, *args, **kwargs):
|
|
|
|
self.space_center = self.mobject.get_center()
|
2018-03-30 11:34:22 -07:00
|
|
|
self.realign_frame_shape()
|
2016-02-27 16:29:11 -08:00
|
|
|
Camera.capture_mobjects(self, *args, **kwargs)
|
|
|
|
|
2018-03-30 11:34:22 -07:00
|
|
|
def realign_frame_shape(self):
|
|
|
|
height, width = self.frame_shape
|
2016-02-27 16:29:11 -08:00
|
|
|
if self.aligned_dimension == "height":
|
2018-03-30 11:34:22 -07:00
|
|
|
self.frame_shape = (self.mobject.get_height(), width)
|
2016-02-27 16:29:11 -08:00
|
|
|
else:
|
2018-03-30 11:34:22 -07:00
|
|
|
self.frame_shape = (height, self.mobject.get_width())
|
|
|
|
self.resize_frame_shape(0 if self.aligned_dimension == "height" else 1)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-02-06 12:44:38 -08:00
|
|
|
# TODO: Add an attribute to mobjects under which they can specify that they should just
|
|
|
|
# map their centers but remain otherwise undistorted (useful for labels, etc.)
|
2018-01-11 16:49:58 -08:00
|
|
|
class MappingCamera(Camera):
|
|
|
|
CONFIG = {
|
|
|
|
"mapping_func" : lambda p : p,
|
2018-01-24 12:14:37 -08:00
|
|
|
"min_anchor_points" : 50,
|
2018-01-11 16:49:58 -08:00
|
|
|
"allow_object_intrusion" : False
|
|
|
|
}
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-11 16:49:58 -08:00
|
|
|
def points_to_pixel_coords(self, points):
|
|
|
|
return Camera.points_to_pixel_coords(self, np.apply_along_axis(self.mapping_func, 1, points))
|
|
|
|
|
|
|
|
def capture_mobjects(self, mobjects, **kwargs):
|
2018-01-24 12:14:37 -08:00
|
|
|
mobjects = self.get_mobjects_to_display(mobjects, **kwargs)
|
2018-01-11 16:49:58 -08:00
|
|
|
if self.allow_object_intrusion:
|
|
|
|
mobject_copies = mobjects
|
|
|
|
else:
|
|
|
|
mobject_copies = [mobject.copy() for mobject in mobjects]
|
|
|
|
for mobject in mobject_copies:
|
|
|
|
if isinstance(mobject, VMobject) and \
|
|
|
|
0 < mobject.get_num_anchor_points() < self.min_anchor_points:
|
|
|
|
mobject.insert_n_anchor_points(self.min_anchor_points)
|
2018-01-24 12:14:37 -08:00
|
|
|
Camera.capture_mobjects(
|
|
|
|
self, mobject_copies,
|
|
|
|
include_submobjects = False,
|
|
|
|
excluded_mobjects = None,
|
|
|
|
)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-16 16:09:16 -08:00
|
|
|
# Note: This allows layering of multiple cameras onto the same portion of the pixel array,
|
|
|
|
# the later cameras overwriting the former
|
|
|
|
#
|
|
|
|
# TODO: Add optional separator borders between cameras (or perhaps peel this off into a
|
|
|
|
# CameraPlusOverlay class)
|
|
|
|
class MultiCamera(Camera):
|
|
|
|
def __init__(self, *cameras_with_start_positions, **kwargs):
|
|
|
|
self.shifted_cameras = [
|
|
|
|
DictAsObject(
|
|
|
|
{
|
|
|
|
"camera" : camera_with_start_positions[0],
|
|
|
|
"start_x" : camera_with_start_positions[1][1],
|
|
|
|
"start_y" : camera_with_start_positions[1][0],
|
|
|
|
"end_x" : camera_with_start_positions[1][1] + camera_with_start_positions[0].pixel_shape[1],
|
|
|
|
"end_y" : camera_with_start_positions[1][0] + camera_with_start_positions[0].pixel_shape[0],
|
|
|
|
})
|
|
|
|
for camera_with_start_positions in cameras_with_start_positions
|
|
|
|
]
|
|
|
|
Camera.__init__(self, **kwargs)
|
|
|
|
|
|
|
|
def capture_mobjects(self, mobjects, **kwargs):
|
|
|
|
for shifted_camera in self.shifted_cameras:
|
|
|
|
shifted_camera.camera.capture_mobjects(mobjects, **kwargs)
|
|
|
|
|
|
|
|
self.pixel_array[
|
|
|
|
shifted_camera.start_y:shifted_camera.end_y,
|
|
|
|
shifted_camera.start_x:shifted_camera.end_x] \
|
|
|
|
= shifted_camera.camera.pixel_array
|
|
|
|
|
2018-01-31 17:17:58 -08:00
|
|
|
def set_background(self, pixel_array, **kwargs):
|
2018-01-16 16:09:16 -08:00
|
|
|
for shifted_camera in self.shifted_cameras:
|
|
|
|
shifted_camera.camera.set_background(
|
|
|
|
pixel_array[
|
|
|
|
shifted_camera.start_y:shifted_camera.end_y,
|
2018-01-31 17:17:58 -08:00
|
|
|
shifted_camera.start_x:shifted_camera.end_x],
|
|
|
|
**kwargs
|
|
|
|
)
|
2016-02-23 22:29:32 -08:00
|
|
|
|
2018-01-31 17:17:58 -08:00
|
|
|
def set_pixel_array(self, pixel_array, **kwargs):
|
|
|
|
Camera.set_pixel_array(self, pixel_array, **kwargs)
|
2018-01-16 16:09:16 -08:00
|
|
|
for shifted_camera in self.shifted_cameras:
|
|
|
|
shifted_camera.camera.set_pixel_array(
|
|
|
|
pixel_array[
|
|
|
|
shifted_camera.start_y:shifted_camera.end_y,
|
2018-01-31 17:17:58 -08:00
|
|
|
shifted_camera.start_x:shifted_camera.end_x],
|
|
|
|
**kwargs
|
|
|
|
)
|
2018-01-16 16:09:16 -08:00
|
|
|
|
|
|
|
def init_background(self):
|
|
|
|
Camera.init_background(self)
|
|
|
|
for shifted_camera in self.shifted_cameras:
|
|
|
|
shifted_camera.camera.init_background()
|
|
|
|
|
|
|
|
# A MultiCamera which, when called with two full-size cameras, initializes itself
|
|
|
|
# as a splitscreen, also taking care to resize each individual camera within it
|
|
|
|
class SplitScreenCamera(MultiCamera):
|
|
|
|
def __init__(self, left_camera, right_camera, **kwargs):
|
|
|
|
digest_config(self, kwargs)
|
|
|
|
self.left_camera = left_camera
|
|
|
|
self.right_camera = right_camera
|
|
|
|
|
|
|
|
half_width = self.pixel_shape[1] / 2
|
|
|
|
for camera in [self.left_camera, self.right_camera]:
|
|
|
|
camera.pixel_shape = (self.pixel_shape[0], half_width) # TODO: Round up on one if width is odd
|
|
|
|
camera.init_background()
|
2018-03-30 11:34:22 -07:00
|
|
|
camera.resize_frame_shape()
|
2018-01-16 16:09:16 -08:00
|
|
|
camera.reset()
|
|
|
|
|
2018-01-30 13:51:22 -08:00
|
|
|
MultiCamera.__init__(self, (left_camera, (0, 0)), (right_camera, (0, half_width)))
|
|
|
|
|
|
|
|
|