Created
August 20, 2024 15:47
-
-
Save monotykamary/d94f5ebafb4102649e39592282cfa618 to your computer and use it in GitHub Desktop.
Quick and Dirty Selective Blur with OpenCV + MTCNN
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import numpy as np | |
from mtcnn import MTCNN | |
def create_blurred_video(input_video, output_video): | |
cap = cv2.VideoCapture(input_video) | |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
out = cv2.VideoWriter(output_video, fourcc, fps, (width, height)) | |
detector = MTCNN() | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
faces = detector.detect_faces(frame) | |
for face in faces: | |
x, y, w, h = face['box'] | |
roi = frame[y:y+h, x:x+w] | |
roi = cv2.GaussianBlur(roi, (23, 23), 30) | |
frame[y:y+h, x:x+w] = roi | |
out.write(frame) | |
cap.release() | |
out.release() | |
def apply_mask_and_combine(original_video, blurred_video, output_video, face_to_keep): | |
cap_original = cv2.VideoCapture(original_video) | |
cap_blurred = cv2.VideoCapture(blurred_video) | |
width = int(cap_original.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap_original.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
fps = cap_original.get(cv2.CAP_PROP_FPS) | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
out = cv2.VideoWriter(output_video, fourcc, fps, (width, height)) | |
detector = MTCNN() | |
while cap_original.isOpened() and cap_blurred.isOpened(): | |
ret_original, frame_original = cap_original.read() | |
ret_blurred, frame_blurred = cap_blurred.read() | |
if not ret_original or not ret_blurred: | |
break | |
faces = detector.detect_faces(frame_original) | |
mask = np.zeros(frame_original.shape[:2], dtype=np.uint8) | |
if len(faces) > face_to_keep: | |
x, y, w, h = faces[face_to_keep]['box'] | |
cv2.rectangle(mask, (x, y), (x+w, y+h), 255, -1) | |
# Invert the mask | |
mask = cv2.bitwise_not(mask) | |
# Combine the videos using the mask | |
result = cv2.bitwise_and(frame_blurred, frame_blurred, mask=mask) | |
result += cv2.bitwise_and(frame_original, frame_original, mask=cv2.bitwise_not(mask)) | |
out.write(result) | |
cap_original.release() | |
cap_blurred.release() | |
out.release() | |
# Main process | |
input_video = 'input_video.mp4' | |
blurred_video = 'blurred_video.mp4' | |
final_output = 'final_output.mp4' | |
face_to_keep = 0 # Index of the face to keep unblurred (0 for the first detected face) | |
# Step 1: Create a blurred version of the video | |
create_blurred_video(input_video, blurred_video) | |
# Step 2 & 3: Apply mask to keep one face visible and combine videos | |
apply_mask_and_combine(input_video, blurred_video, final_output, face_to_keep) | |
print("Video processing complete. Output saved as", final_output) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment