Skip to content

Instantly share code, notes, and snippets.

@xenogenesi
Created October 25, 2020 22:50
Show Gist options
  • Save xenogenesi/fc4ec4ecebec4861db87ff633dea6347 to your computer and use it in GitHub Desktop.
Save xenogenesi/fc4ec4ecebec4861db87ff633dea6347 to your computer and use it in GitHub Desktop.
# tested with blender 2.83.5
# some dependency is required: dlib imutils opencv numpy
import bpy
import os
# hack to get imutils to work
import sys; sys.path.insert(0, os.getenv('HOME') + "/.local/lib/python3.8/site-packages")
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import cv2
class CLIP_OT_dlib_landmarks(bpy.types.Operator):
"""Add markers from dlib landmarks"""
bl_idname = "clip.dlib_landmarks"
bl_label = "Dlib Landmarks"
def execute(self, context):
print("execute called")
detector = dlib.get_frontal_face_detector()
shape_predictor = "/usr/share/dlib/shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(shape_predictor)
clip = context.edit_movieclip # bpy.data.movieclips[1]
vidfile = os.path.abspath(bpy.path.abspath(clip.filepath))
print(f"analyzing file: {vidfile}")
if clip.source != 'MOVIE':
print(f"only MOVIE clip supported yet, requested '{clip.source}'")
return {'CANCELLED'}
cap = cv2.VideoCapture(vidfile)
vidFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
if (cap.isOpened()== False):
print("Error opening video stream or file")
return {'CANCELLED'}
frame_start = context.scene.frame_start
frame_end = context.scene.frame_end
frame_step = context.scene.frame_step
if frame_step != 1:
print("Error frame_step != 1 not supported")
return {'CANCELLED'}
if frame_start >= 0 & frame_start <= vidFrames:
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_start)
else:
print(f"Error requested frame: {frame_start} (max: {vidFrames})")
return {'CANCELLED'}
width = clip.size[0]
height = clip.size[1]
rf = frame_start
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
context.scene.frame_current = rf
if ret == True:
#image = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
print(i)
j = 0
for (x, y) in shape:
mname = f"face{i}-m{j}"
# invert y coord (blender=TL/opencv=BL)
y = height-y
if mname in clip.tracking.objects[0].tracks:
clip.tracking.objects[0].tracks[mname].markers.insert_frame(rf, co=(x/width, y/height))
else:
bpy.ops.clip.add_marker(location=(x/width, y/height))
clip.tracking.tracks.active.name=mname
j += 1
rf += frame_step
else:
break
if rf > frame_end:
break
cap.release()
print("execute done")
return {'FINISHED'}
@classmethod
def poll(cls, context):
return "CLIP_EDITOR" == context.area.type
def register():
bpy.utils.register_class(CLIP_OT_dlib_landmarks)
def unregister():
bpy.utils.unregister_class(CLIP_OT_dlib_landmarks)
if __name__ == '__main__':
register()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment