Created
June 24, 2022 19:29
-
-
Save Mooussa/40218ec442e6450037b0087ec60ba5b1 to your computer and use it in GitHub Desktop.
Real time face recognition
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#_____________________________________________________________________# | |
import face_recognition.build.lib.face_recognition.api as face_recognition | |
import cv2 | |
import numpy as np | |
obama_image = face_recognition.load_image_file("obama.jpg") | |
obama_face_encoding = face_recognition.face_encodings(obama_image)[0] | |
biden_image = face_recognition.load_image_file("biden.jpg") | |
biden_face_encoding = face_recognition.face_encodings(biden_image)[0] | |
known_face_encodings = [ | |
obama_face_encoding, | |
biden_face_encoding | |
] | |
known_face_names = [ | |
"Barack Obama", | |
"Joe Biden" | |
] | |
# Initialize some variables | |
face_locations = [] | |
face_encodings = [] | |
face_names = [] | |
process_this_frame = True | |
while True: | |
# Grab a single frame of video | |
video_capture = cv2.VideoCapture(0) | |
ret, frame = video_capture.read() | |
# Only process every other frame of the video to save time | |
if process_this_frame: | |
# Resize frame of video to 1/4 size for faster face recognition processing | |
small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) | |
# Convert the image from BGR color (which OpenCV uses) to RGB color | |
rgb_small_frame = small_frame[:, :, ::-1] | |
# Find all the faces and face encodings in the current frame of video | |
face_locations = face_recognition.face_locations(rgb_small_frame) | |
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) | |
face_names = [] | |
for face_encoding in face_encodings: | |
# See if the face is a match for the known face(s) | |
matches = face_recognition.compare_faces(known_face_encodings, face_encoding) | |
name = "Unknown" | |
# # If a match was found in known_face_encodings, just use the first one | |
# if True in matches: | |
# first_match_index = matches.index(True) | |
# name = known_face_names[first_match_index] | |
# Or instead, use the known face with the smallest distance to the new face | |
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding) | |
best_match_index = np.argmin(face_distances) | |
if matches[best_match_index]: | |
name = known_face_names[best_match_index] | |
face_names.append(name) | |
process_this_frame = not process_this_frame | |
# Display the results | |
for (top, right, bottom, left), name in zip(face_locations, face_names): | |
# Scale back up face locations since the frame we detected in was scaled to 1/4 size | |
top *= 4 | |
right *= 4 | |
bottom *= 4 | |
left *= 4 | |
# Draw a box around the face | |
cv2.rectangle(frame, (left,top), (right, bottom), (0, 0, 255), 2) | |
# Draw a label with a name below the face | |
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) | |
font = cv2.FONT_HERSHEY_DUPLEX | |
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) | |
# Display the resulting image | |
cv2.imshow('Video', frame) | |
# Hit 'q' on the keyboard to quit! | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# Release handle to the webcam | |
video_capture.release() | |
cv2.destroyAllWindows() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment