Forked from mhawksey/gist:0c5ad7b79e1162b239156ce946cbe2be
Created
January 16, 2020 21:40
-
-
Save dnalob/9ec47dcfc43f08fa17487c90872bd8a2 to your computer and use it in GitHub Desktop.
Snippet of code used for DevFest London 2017 to count faces in audience and send to Google Analytics and update image in Google Slides (see https://mashe.hawksey.info/?p=17787)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import io | |
import picamera | |
import cv2 | |
import numpy | |
import requests | |
import base64 | |
def hitGA(faces): | |
print("Sending to GA") | |
requests.get("http://www.google-analytics.com/collect?v=1" \ | |
+ "&tid=YOUR_UA_TRACKING_ID_HERE" \ | |
+ "&cid=1111" \ | |
+ "&t=event" \ | |
+ "&ec=FaceDetection" \ | |
+ "&ea=faces" \ | |
+ "&el=DevFest17" | |
+ "&ev=" + faces).close | |
maxFaces = -1 | |
#Setup posting result to Slides | |
url = 'PUBLISHED_WEB_APP_URL_FROM_GOOGLE_APPS_SCRIPT' | |
# prepare headers for http request | |
content_type = 'image/jpeg' | |
headers = {'content-type': content_type} | |
while True: | |
#Create a memory stream so photos doesn't need to be saved in a file | |
stream = io.BytesIO() | |
#Here you can also specify other parameters (e.g.:rotate the image) | |
with picamera.PiCamera() as camera: | |
camera.resolution = (2592, 1944) | |
camera.iso = 800 | |
camera.capture(stream, format='jpeg') | |
#Convert the picture into a numpy array | |
buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8) | |
#Now creates an OpenCV image | |
image = cv2.imdecode(buff, 1) | |
#Load a cascade file for detecting faces | |
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml') | |
#Convert to grayscale | |
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) | |
#Look for faces in the image using the loaded cascade file | |
faces = face_cascade.detectMultiScale(gray, 1.1, 5) | |
facesInt = len(faces) | |
print ("Found " + str(facesInt) + " face(s)") | |
#Send faces counted to GA | |
hitGA(str(facesInt)) | |
#Draw a rectangle around every found face | |
for (x,y,w,h) in faces: | |
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2) | |
#Save the result image if new maximum | |
if facesInt > maxFaces: | |
retval, buffer = cv2.imencode('.jpg', image) | |
img_encoded = base64.b64encode(buffer) | |
response = requests.post(url, data=img_encoded, headers=headers) | |
maxFaces = facesInt | |
print (response.text) | |
#Show the result image | |
imS = cv2.resize(image, (640, 360)) | |
cv2.imshow('frame', imS) | |
k = cv2.waitKey(1000) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment