Skip to content

Instantly share code, notes, and snippets.

@yinguobing
Created July 21, 2020 07:33
Show Gist options
  • Save yinguobing/cb17d0f454d47c1d76112d178dc370ab to your computer and use it in GitHub Desktop.
Save yinguobing/cb17d0f454d47c1d76112d178dc370ab to your computer and use it in GitHub Desktop.
Save the face mesh points animation video file.
import tkinter
import cv2
import matplotlib
from matplotlib import animation, pyplot
from mpl_toolkits.mplot3d import Axes3D
# The module mesh_detector could be found here:
# https://github.com/yinguobing/head-pose-estimation/tree/google-face-mesh
from mesh_detector import MeshDetector
matplotlib.use("TkAgg")
if __name__ == "__main__":
# Construct a face mesh detector.
md = MeshDetector(
"/home/robin/Desktop/head-pose-estimation/assets/face_landmark.tflite")
# Read in the image file.
image_file = '/home/robin/Desktop/lfpw-trainset-image_0871.jpg'
image = cv2.imread(image_file)
# The mesh detector is sensitive to the face box size. The sample image I
# used is smaller than expected.
epn_width = 20
image = cv2.copyMakeBorder(image,
epn_width, epn_width, epn_width, epn_width,
cv2.BORDER_CONSTANT, value=[0, 0, 0])
# Then, get the mesh.
mesh, _ = md.get_mesh(image)
# Draw the face mesh points in 3D, and save the animation.
fig = pyplot.figure(1, figsize=[12.8, 9.6], dpi=80)
ax = Axes3D(fig)
x, y, z = [], [], []
face = ax.scatter3D([], [], [], c='#ae7181')
def init():
ax.view_init(260, 270)
ax.set_xlim3d(0, 400)
ax.set_ylim3d(0, 400)
ax.set_zlim3d(-200, 200)
ax.set_xlabel('x')
ax.set_ylabel('y')
return face
def update(point):
print(point)
x.append(point[0])
y.append(point[1])
z.append(point[2])
face._offsets3d = (x, y, z)
return face
ani = animation.FuncAnimation(
fig, update, frames=mesh, init_func=init)
# Save the animation to video file.
# ani.save('animation.mp4')
pyplot.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment