Created
April 4, 2018 17:30
-
-
Save crearo/41f0acece8cdda22f0d624a47b730530 to your computer and use it in GitHub Desktop.
A simple pedestrian detector based on PyImageSearch's blogpost
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import cv2 | |
from imutils.object_detection import non_max_suppression | |
from imutils import paths | |
import argparse | |
import imutils | |
""" | |
The parameters passed to the hog detector need to be played around | |
with to get optimum speed vs accuracy. This will always be a tradeoff. | |
""" | |
def detect_people(hog, image): | |
orig = image.copy() | |
# detect people in the image | |
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4), | |
padding=(8, 8), scale=1.05) | |
# draw the original bounding boxes | |
for (x, y, w, h) in rects: | |
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2) | |
# apply non-maxima suppression to the bounding boxes using a | |
# fairly large overlap threshold to try to maintain overlapping | |
# boxes that are still people | |
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects]) | |
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65) | |
# draw the final bounding boxes | |
for (xA, yA, xB, yB) in pick: | |
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2) | |
# # show some information on the number of bounding boxes | |
# filename = imagePath[imagePath.rfind("/") + 1:] | |
# print("[INFO] {}: {} original boxes, {} after suppression".format( | |
# filename, len(rects), len(pick))) | |
def open_cam_and_detect(path): | |
cap = cv2.VideoCapture(path) | |
# initialize the HOG descriptor/person detector | |
hog = cv2.HOGDescriptor() | |
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) | |
if not cap.isOpened(): | |
raise AssertionError('Cap not opened') | |
while(True): | |
# Capture frame-by-frame | |
ret, frame = cap.read() | |
if not ret: | |
raise AssertionError('Cap not opened') | |
# Our operations on the frame come here | |
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
detect_people(hog, frame) | |
# Display the resulting frame | |
cv2.imshow('frame',frame) | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# When everything done, release the capture | |
cap.release() | |
cv2.destroyAllWindows() | |
if __name__ == '__main__': | |
open_cam_and_detect(0) # uses webcam | |
open_cam_and_detect('/home/rish/Desktop/temp.mp4') # uses file stored |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment