Created
August 29, 2018 12:33
-
-
Save ikouchiha47/0713a1e2a9a12569ca0d076311c80478 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import cv2 as cv | |
import math | |
# the MPI model(16 parts): | |
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel | |
# https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt | |
# (to simplify this sample, the body models are restricted to a single person.) | |
# the hand pose model: | |
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/hand/pose_iter_102000.caffemodel | |
# https://raw.githubusercontent.com/CMU-Perceptual-Computing-Lab/openpose/master/models/hand/pose_deploy.prototxt | |
threshold = 0.1 | |
HAND_PARTS = { | |
"BOTTOM": 0, "BOTTOM1": 1, | |
"THUMBBOTTOM": 2, "THUMBMID": 3, "THUMBTOP": 4, | |
"INDEXBOTTOM": 5, "INDEXMID1": 6, "INDEXMID2": 7, "INDEXTOP": 8, | |
"MIDBOTTOM": 9, "MIDMID1": 10, "MIDMID2": 11, "MIDTOP": 12, | |
"RINGBOTTOM": 13, "RINGMID1": 14, "RINGMID2": 15, "RINGTOP": 16, | |
"PINKYBOTTOM": 17, "PINKYMID1": 18, "PINKYMID2": 19, "PINKYTOP": 20, "HEAVEN": 21 | |
} | |
POSE_PAIRS = [ | |
["BOTTOM", "BOTTOM1"], ["BOTTOM1", "THUMBBOTTOM"], ["THUMBBOTTOM", "THUMBMID"], ["THUMBMID", "THUMBTOP"], | |
["BOTTOM", "INDEXBOTTOM"], ["INDEXBOTTOM", "INDEXMID1"], ["INDEXMID1", "INDEXMID2"], ["INDEXMID2", "INDEXTOP"], | |
["BOTTOM", "MIDBOTTOM"], ["MIDBOTTOM", "MIDMID1"], ["MIDMID1", "MIDMID2"], ["MIDMID2", "MIDTOP"], | |
["BOTTOM", "RINGBOTTOM"], ["RINGBOTTOM", "RINGMID1"], ["RINGMID1", "RINGMID2"], ["RINGMID2", "RINGTOP"], | |
["BOTTOM", "PINKYBOTTOM"], ["PINKYBOTTOM", "PINKYMID1"], ["PINKYMID1", "PINKYMID2"], ["PINKYMID2", "PINKYTOP"] | |
] | |
protofile = "./proto/hand_pose.prototxt" | |
modelfile = "./models/pose_iter_102000.caffemodel" | |
inWidth = 368 | |
inHeight = 368 | |
net = cv.dnn.readNetFromCaffe(protofile, modelfile) | |
cap = cv.VideoCapture(0) | |
while True: | |
ret, frame = cap.read() | |
frame = cv.flip(frame, 1) | |
frameWidth = frame.shape[1] | |
frameHeight = frame.shape[0] | |
inp = cv.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), | |
(0, 0, 0), swapRB=False, crop=False) | |
net.setInput(inp) | |
out = net.forward() | |
if (len(HAND_PARTS) != out.shape[1]): | |
continue | |
assert(len(HAND_PARTS) == out.shape[1]) | |
points = [] | |
for i in range(len(HAND_PARTS)): | |
# Slice heatmap of corresponging body's part. | |
heatMap = out[0, i, :, :] | |
# Originally, we try to find all the local maximums. To simplify a sample | |
# we just find a global one. However only a single pose at the same time | |
# could be detected this way. | |
_, conf, _, point = cv.minMaxLoc(heatMap) | |
x = (frameWidth * point[0]) / out.shape[3] | |
y = (frameHeight * point[1]) / out.shape[2] | |
# Add a point if it's confidence is higher than threshold. | |
points.append((int(x), int(y)) if conf > threshold else None) | |
for pair in POSE_PAIRS: | |
partFrom = pair[0] | |
partTo = pair[1] | |
assert(partFrom in HAND_PARTS) | |
assert(partTo in HAND_PARTS) | |
idFrom = HAND_PARTS[partFrom] | |
idTo = HAND_PARTS[partTo] | |
if points[idFrom] and points[idTo]: | |
cv.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3) | |
cv.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) | |
cv.ellipse(frame, points[idTo], (3, 3), 0, 0, 360, (0, 0, 255), cv.FILLED) | |
t, _ = net.getPerfProfile() | |
freq = cv.getTickFrequency() / 1000 | |
cv.putText(frame, '%.2fms' % (t / freq), (10, 20), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) | |
cv.imshow('OpenPose using OpenCV', frame) | |
k = cv.waitKey(30) & 0xff | |
if k == 27: | |
break | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment