Created
September 4, 2020 04:26
-
-
Save smeschke/97a1ce4ae770fa91f102965e77916db9 to your computer and use it in GitHub Desktop.
Temporal video effect for Python/OpenCV
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
# Define the multiverse parameter | |
multiverse_parameter = 3 | |
# Define the temporal seperation in frames | |
temporal_seperation = 24 | |
# Define path to source video | |
source_path = '/home/stephen/Desktop/vids/mid.mp4' | |
# Color values for objects that will appear in full alpha | |
color_values = 0,39,255,255,255,255 | |
# https://docs.opencv.org/4.1.2/df/d9d/tutorial_py_colorspaces.html | |
# Segments an image by color | |
def only_color(frame, color_valeus): | |
# Create a mask | |
mask = np.zeros((frame.shape[0], frame.shape[1]), np.uint8) | |
b,r,g,b1,r1,g1 = color_values | |
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) | |
# Define range of blue color in HSV | |
lower, upper = np.array([b,r,g]), np.array([b1,r1,g1]) | |
# Threshold the HSV image to get only blue colors | |
mask = cv2.inRange(hsv, lower, upper) | |
return mask | |
# Show segmented part of image in full alpha | |
def mask_function(img, bg, color_values): | |
mask = only_color(img, color_values) | |
a = cv2.bitwise_and(img, img, mask = mask) | |
b = cv2.bitwise_and(bg, bg, mask = 255-mask) | |
return a + b | |
# Put the captures into a list | |
captures = [] | |
for i in range(multiverse_parameter): | |
captures.append(cv2.VideoCapture(source_path)) | |
# Seek to different times in the video | |
currnet_spot_in_time = temporal_seperation | |
for cap in captures[1:]: | |
for i in range(currnet_spot_in_time): | |
_, img = cap.read() | |
currnet_spot_in_time += temporal_seperation | |
# Play all three videos at the same time | |
while True: | |
# Create a list for source images, and alpha images | |
sourceImages, alphaImages = [], [] | |
# Get an image from each source | |
for cap in captures: | |
_, img = cap.read() | |
# Read in image form | |
sourceImages.append(img) | |
# Read in alpha form | |
alphaImages.append(np.float32(img)) | |
# Create a background image to composite on to | |
bg = np.zeros_like(alphaImages[0]) | |
# Overlay the 1/3 of each of the source images | |
for img in alphaImages: bg += img/multiverse_parameter | |
# Convert the background from a spreadsheet to an image | |
bg = np.array(bg, np.uint8) | |
# Mask out the parts of the images that should be shown in full alpha | |
for img in sourceImages: bg = mask_function(img, bg, color_values) | |
# Show the composite image | |
cv2.imshow('bg', bg) | |
k = cv2.waitKey(1) | |
if k == 27: break | |
# Clean up | |
cv2.destroyAllWindows() | |
for cap in captures: cap.release() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Output Video:

Source video:
https://drive.google.com/file/d/1fhFmzv0yPdbQ-N-C-gRr1PuPBp-vfQNB/view?usp=sharing