Last active
December 1, 2020 08:46
-
-
Save will7200/58f07bb94c2e18d87b636412fa183cd3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from manimlib.imports import * | |
original = 8 | |
new = FRAME_HEIGHT | |
scale_facor = FRAME_HEIGHT / original | |
print(FRAME_HEIGHT, FRAME_WIDTH) | |
class Level(Polygon): | |
CONFIG = { | |
"color" : WHITE, | |
"mark_paths_closed": True, | |
"close_new_points" : True, | |
} | |
def __init__(self, UL, UR, DR, DL, **kwargs): | |
Polygon.__init__(self, UL, UR, DR, DL, **kwargs) | |
class BaseLevel(VGroup): | |
def __init__(self, levels=18, height=3, windows=8, **kwargs): | |
VGroup.__init__(self, **kwargs) | |
for x in range(0, levels): | |
Frame = Level(np.array([-FRAME_X_RADIUS / 2, -FRAME_Y_RADIUS + (x * height) + height, 0]), | |
np.array([FRAME_X_RADIUS / 2, -FRAME_Y_RADIUS + (x * height) + height, 0]), | |
np.array([FRAME_X_RADIUS / 2, -FRAME_Y_RADIUS + (x * height), 0]), | |
np.array([-FRAME_X_RADIUS / 2, -FRAME_Y_RADIUS + (x * height), 0])).set_fill("#D5DFE2", .5) | |
# self.add(TextMobject(f"Level {x}").move_to([-FRAME_X_RADIUS, -FRAME_Y_RADIUS + (x * height) + 1, 0])) | |
self.add(Frame) | |
if x == 0: | |
left_door = Level(np.array([-2, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([0, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([0, -FRAME_Y_RADIUS + (x * height), 0]), | |
np.array([-2, -FRAME_Y_RADIUS + (x * height), 0])).set_fill(RED, 1) | |
right_door = Level(np.array([2, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([0, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([0, -FRAME_Y_RADIUS + (x * height), 0]), | |
np.array([2, -FRAME_Y_RADIUS + (x * height), 0])).set_fill(RED, 1) | |
left_door_window = Circle(radius=.5).move_to(left_door).set_fill(BLUE, 1) | |
right_door_window = Circle(radius=.5).move_to(right_door).set_fill(BLUE, 1) | |
left_door_knob = Circle(radius=.1).move_to(left_door, aligned_edge=RIGHT).set_fill(WHITE, 1) | |
right_door_knob = Circle(radius=.1).move_to(right_door, aligned_edge=LEFT).set_fill(WHITE, 1) | |
self.add(left_door) | |
self.add(right_door) | |
self.add(left_door_window) | |
self.add(right_door_window) | |
self.add(left_door_knob) | |
self.add(right_door_knob) | |
# windows | |
if x > 0: | |
shift = FRAME_X_RADIUS / 2 / windows | |
start = -FRAME_X_RADIUS / 2 + .5 | |
for xx in range(0, windows): | |
self.add( | |
Level(np.array([start + shift * xx + .5, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([start + shift + shift * xx, -FRAME_Y_RADIUS + (x * height) + height - .5, 0]), | |
np.array([start + shift + shift * xx, -FRAME_Y_RADIUS + (x * height) + .5, 0]), | |
np.array([start + shift * xx + .5, -FRAME_Y_RADIUS + (x * height) + .5, 0])).set_fill( | |
BLUE, 1)) | |
start = start + shift | |
class Zappos18StoryBuilding(MovingCameraScene): | |
CONFIG = { | |
"camera_config": {"background_color": "#4D4D4D"}, | |
} | |
def setup(self): | |
MovingCameraScene.setup(self) | |
self.camera_frame.set_color("#4D4D4D") | |
# self.set_camera_background('#4D4D4D') | |
def construct(self): | |
title = TextMobject("Building an 18 story building").scale(scale_facor) | |
python = TextMobject( | |
"\\tiny using only Python" | |
).scale(scale_facor) | |
python_icon = SVGMobject("Python-logo-notext.svg").set_height(height=FRAME_HEIGHT / 8).scale(.25) | |
python_icon[0].set_style(fill_opacity=1, stroke_width=0, stroke_opacity=0, fill_color=BLUE) | |
python_icon[1].set_style(fill_opacity=1, stroke_width=0, stroke_opacity=0, fill_color=YELLOW) | |
VGroup(title, TextMobject("space").scale(scale_facor * 2), python).arrange(DOWN) | |
python_group = VGroup(python, python_icon).arrange(RIGHT) | |
self.play( | |
Write(title), | |
FadeInFrom(python_group, UP), | |
) | |
self.wait() | |
transform_title = TextMobject("Let's Starting Building").scale(scale_facor) | |
transform_title.to_corner(UP + LEFT) | |
self.play( | |
FadeOut(title), | |
LaggedStart(*map(FadeOutAndShiftDown, python_group)), | |
Write(transform_title) | |
) | |
self.wait() | |
self.remove(transform_title) | |
last_title = TextMobject("All Buildings Need Entrances").scale(scale_facor).to_corner(UP + LEFT) | |
self.play( | |
FadeOut(transform_title), | |
Write(last_title), | |
ShowCreation(BaseLevel(levels=1), run_time=3.0) | |
) | |
self.wait() | |
lt = TextMobject("Add some levels!").scale(scale_facor).to_corner(UP + LEFT) | |
for x in range(6, 19, 6): | |
bl = BaseLevel(levels=1 + x) | |
bl2 = BaseLevel(levels=2 + x) | |
if last_title.tex_string == "All Buildings Need Entrances": | |
self.play( | |
FadeOut(last_title), | |
Write(lt), | |
ShowCreation(bl, run_time=3) | |
) | |
else: | |
self.play( | |
Write(lt), | |
ShowCreation(bl, run_time=3) | |
) | |
last_title = lt | |
self.wait() | |
self.play( | |
Transform(bl, bl2, run_time=3) | |
) | |
self.wait() | |
self.play( | |
Transform(lt, TextMobject("Voilà!").scale(scale_facor).to_corner(UP + LEFT))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# This script copies the video frame by frame | |
import sys | |
import cv2 | |
import numpy as np | |
import subprocess as sp | |
from tqdm import tqdm | |
gopher = cv2.imread('gopher-rotated.png', cv2.IMREAD_UNCHANGED) | |
python = cv2.imread('python.png', cv2.IMREAD_UNCHANGED) | |
icol = (0, 150, 161, 6, 255, 255) | |
lowHue = icol[0] | |
lowSat = icol[1] | |
lowVal = icol[2] | |
highHue = icol[3] | |
highSat = icol[4] | |
highVal = icol[5] | |
def add_overlay(frame1, overlay, alpha=1.0): | |
frameBGR = cv2.GaussianBlur(frame1, (7, 7), 0) | |
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV) | |
colorLow = np.array([lowHue, lowSat, lowVal]) | |
colorHigh = np.array([highHue, highSat, highVal]) | |
mask = cv2.inRange(hsv, colorLow, colorHigh) | |
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal) | |
img = cv2.bitwise_and(frame1, frame1, mask=mask) | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 30)) | |
thresh = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, rect_kernel) | |
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh) | |
size_thresh = 300 | |
for i in range(1, n_labels): | |
if stats[i, cv2.CC_STAT_AREA] >= size_thresh: | |
x = stats[i, cv2.CC_STAT_LEFT] | |
y = stats[i, cv2.CC_STAT_TOP] | |
w = stats[i, cv2.CC_STAT_WIDTH] | |
h = stats[i, cv2.CC_STAT_HEIGHT] | |
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=1) | |
x_center = int(centroids[i][0]) - overlay.shape[1] // 2 | |
y_center = int(centroids[i][1]) - overlay.shape[0] // 2 | |
for y in range(0, overlay.shape[0]): | |
for x in (range(0, overlay.shape[1])): | |
if overlay[y, x, 3] != 0: | |
frame1[y_center + y, x_center + x, 0:3] = overlay[y, x, 0:3] * alpha | |
return frame1 | |
input_file = '001.MOV' | |
output_file = '001-modified.mp4' | |
cap = cv2.VideoCapture(input_file) | |
ret, frame = cap.read() | |
height, width, ch = frame.shape | |
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS" | |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
duration = frame_count / fps | |
ffmpeg = 'FFMPEG' | |
dimension = '{}x{}'.format(width, height) | |
f_format = 'bgr24' # remember OpenCV uses bgr format | |
fps = str(cap.get(cv2.CAP_PROP_FPS)) | |
command = [ffmpeg, | |
'-y', | |
'-f', 'rawvideo', | |
'-vcodec', 'rawvideo', | |
'-s', dimension, | |
'-pix_fmt', 'bgr24', | |
'-r', fps, | |
'-i', '-', | |
'-an', | |
'-vcodec', 'mpeg4', | |
'-b:v', '45000K', | |
output_file] | |
progress = tqdm(total=frame_count) | |
err_file = open('error.log', 'w') | |
proc = sp.Popen(command, stdin=sp.PIPE, stderr=err_file, stdout=sys.stdout) | |
proc.stdin.write(frame.tostring()) | |
count = 1 | |
progress.update() | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
if 389 < count < 600: | |
alpha = 1.0 | |
if count < (389+15): | |
alpha = count / (389+15) | |
proc.stdin.write(add_overlay(frame, gopher, alpha).tostring()) | |
elif 650 < count < 830: | |
alpha = 1.0 | |
if count < (650+15): | |
alpha = count / (650+15) | |
proc.stdin.write(add_overlay(frame, python, alpha).tostring()) | |
elif 860 < count < 1070: | |
alpha = 1.0 | |
if count < (860+15): | |
alpha = count / (860+15) | |
proc.stdin.write(add_overlay(frame, gopher, alpha).tostring()) | |
else: | |
proc.stdin.write(frame.tostring()) | |
count += 1 | |
progress.update() | |
progress.close() | |
cap.release() | |
proc.stdin.close() | |
err_file.close() | |
proc.wait() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# This script copies the video frame by frame | |
import sys | |
from multiprocessing import Pool | |
import cv2 | |
import numpy as np | |
import subprocess as sp | |
from tqdm import tqdm | |
import concurrent.futures | |
gopher = cv2.imread('gopher-front.png', cv2.IMREAD_UNCHANGED) | |
arena = cv2.imread('SSBU-Pokemon_Stadium_2.png', cv2.IMREAD_UNCHANGED) | |
_icol = (54, 62, 95, 179, 151, 140) | |
def add_overlay(frame1, overlay, alpha=1.0, size_thresh=300, icol=_icol, min_x=0, min_y=0): | |
lowHue = icol[0] | |
lowSat = icol[1] | |
lowVal = icol[2] | |
highHue = icol[3] | |
highSat = icol[4] | |
highVal = icol[5] | |
frameBGR = cv2.GaussianBlur(frame1, (7, 7), 0) | |
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV) | |
colorLow = np.array([lowHue, lowSat, lowVal]) | |
colorHigh = np.array([highHue, highSat, highVal]) | |
mask = cv2.inRange(hsv, colorLow, colorHigh) | |
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30, 30)) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal) | |
img = cv2.bitwise_and(frame1, frame1, mask=mask) | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(gray) | |
ii = None | |
for i in range(1, n_labels): | |
if stats[i, cv2.CC_STAT_AREA] >= size_thresh: | |
if ii is None: | |
ii = (i, stats[i, cv2.CC_STAT_AREA]) | |
elif ii[1] < stats[i, cv2.CC_STAT_AREA]: | |
ii = (i, stats[i, cv2.CC_STAT_AREA]) | |
if ii: | |
i = ii[0] | |
if stats[i, cv2.CC_STAT_AREA] >= size_thresh: | |
# x = stats[i, cv2.CC_STAT_LEFT] | |
# y = stats[i, cv2.CC_STAT_TOP] | |
w = stats[i, cv2.CC_STAT_WIDTH] | |
h = stats[i, cv2.CC_STAT_HEIGHT] | |
# cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=1) | |
dim = (w, h) | |
overlay = cv2.resize(overlay, dim, interpolation=cv2.INTER_AREA) | |
x_center = int(centroids[i][0]) - overlay.shape[1] // 2 | |
y_center = int(centroids[i][1]) - overlay.shape[0] // 2 | |
for y in range(0, overlay.shape[0]): | |
for x in (range(0, overlay.shape[1])): | |
if overlay[y, x, 3] != 0 and (y_center + y < frame1.shape[0]) and (x_center + x < frame1.shape[1]): | |
frame1[y_center + y, x_center + x, 0:3] = overlay[y, x, 0:3] * alpha | |
return frame1 | |
def max_add_overlay(original_1, frame1, overlay, alpha=1.0, size_thresh=2000000, icol=_icol): | |
lowHue = icol[0] | |
lowSat = icol[1] | |
lowVal = icol[2] | |
highHue = icol[3] | |
highSat = icol[4] | |
highVal = icol[5] | |
frameBGR = cv2.GaussianBlur(original_1, (7, 7), 0) | |
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV) | |
colorLow = np.array([lowHue, lowSat, lowVal]) | |
colorHigh = np.array([highHue, highSat, highVal]) | |
mask = cv2.inRange(hsv, colorLow, colorHigh) | |
kernal = cv2.getStructuringElement(cv2.MORPH_RECT, (75, 75)) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal) | |
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal) | |
img = cv2.bitwise_and(original_1, original_1, mask=mask) | |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(gray) | |
ii = None | |
for i in range(1, n_labels): | |
if stats[i, cv2.CC_STAT_AREA] >= size_thresh: | |
if ii is None: | |
ii = (i, stats[i, cv2.CC_STAT_AREA]) | |
elif ii[1] < stats[i, cv2.CC_STAT_AREA]: | |
ii = (i, stats[i, cv2.CC_STAT_AREA]) | |
if ii: | |
i = ii[0] | |
w = stats[i, cv2.CC_STAT_WIDTH] | |
h = stats[i, cv2.CC_STAT_HEIGHT] | |
dim = (w, h) | |
overlay = cv2.resize(overlay, dim, interpolation=cv2.INTER_AREA) | |
x_center = stats[i, cv2.CC_STAT_LEFT] | |
y_center = stats[i, cv2.CC_STAT_TOP] | |
for y in range(0, overlay.shape[0]): | |
for x in (range(0, overlay.shape[1])): | |
if mask[y_center + y, x_center + x] > 0 and (y_center + y < frame1.shape[0]) and ( | |
x_center + x < frame1.shape[1]): | |
frame1[y_center + y, x_center + x, 0:3] = overlay[y, x, 0:3] | |
return frame1 | |
def worker(args): | |
frame = args[0] | |
count = args[1] | |
original = frame.copy() | |
if 160 < count < 9999999999: | |
alpha = 1.0 | |
if count < (160 + 15): | |
alpha = count / (160 + 15) | |
frame = add_overlay(frame, gopher, alpha, size_thresh=20000) | |
if 350 < count < 9999999999: | |
frame = max_add_overlay(original, frame, arena, icol=(7, 0, 78, 17, 255, 123), size_thresh=200000) | |
return frame | |
if __name__ == "__main__": | |
input_file = '003.MOV' | |
output_file = '003-modified.mp4' | |
cap = cv2.VideoCapture(input_file) | |
ret, frame = cap.read() | |
height, width, ch = frame.shape | |
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS" | |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
duration = frame_count / fps | |
ffmpeg = 'FFMPEG' | |
dimension = '{}x{}'.format(width, height) | |
f_format = 'bgr24' # remember OpenCV uses bgr format | |
fps = str(cap.get(cv2.CAP_PROP_FPS)) | |
command = [ffmpeg, | |
'-y', | |
'-f', 'rawvideo', | |
'-vcodec', 'rawvideo', | |
'-s', dimension, | |
'-pix_fmt', 'bgr24', | |
'-r', fps, | |
'-i', '-', | |
'-an', | |
'-vcodec', 'mpeg4', | |
'-b:v', '45000K', | |
output_file] | |
progress = tqdm(total=frame_count) | |
err_file = open('error.log', 'w') | |
proc = sp.Popen(command, stdin=sp.PIPE, stderr=err_file, stdout=sys.stdout) | |
proc.stdin.write(frame.tostring()) | |
count = 1 | |
progress.update() | |
buffer = [] | |
p = Pool(5) | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
buffer.append([frame, count]) | |
count += 1 | |
if buffer.__len__() == 100: | |
for x in p.map(worker, buffer, chunksize=1): | |
proc.stdin.write(x.tostring()) | |
progress.update(n=100) | |
buffer = [] | |
if buffer.__len__() > 0: | |
for x in p.map(worker, buffer, chunksize=1): | |
proc.stdin.write(x.tostring()) | |
progress.update() | |
buffer = [] | |
p.close() | |
p.join() | |
progress.close() | |
cap.release() | |
proc.stdin.close() | |
err_file.close() | |
proc.wait() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment