Last active
May 14, 2018 17:23
-
-
Save TonsOfFun/dc8eb9189adc070d19357821a85fd865 to your computer and use it in GitHub Desktop.
Crypto Smart Camera: Motion Capture
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import io | |
| import random | |
| import picamera | |
| from PIL import Image | |
| from | |
| import json | |
| import re | |
| import requests | |
| global previous_frame | |
| global current_frame | |
| global last_capture_at | |
| global avg | |
| def upload_capture(): | |
| if current_frame is not None: | |
| last_capture_at = time.time() | |
| Note(host='http://greenthumb-proto.herokuapp.com', | |
| title='This is an automated upload', | |
| frame=current_frame, | |
| profile=profile, | |
| user_email='myemail@gmail.com', | |
| user_token='sometokenhere').create_note() | |
| def detect_motion(camera): | |
| stream = io.BytesIO() | |
| camera.capture(stream, format='jpeg', use_video_port=True) | |
| stream.seek(0) | |
| if previous_frame is None: | |
| previous_frame = Image.open(stream) | |
| return False | |
| else: | |
| current_frame = Image.open(stream) | |
| result = motion(current_frame, previous_frame) | |
| # You could decide to call upload_capture() | |
| # Here periodically in an event loop for timelapse uploads | |
| previous_frame = current_frame | |
| return result | |
| with picamera.PiCamera() as camera: | |
| camera.resolution = (1920, 1080) | |
| stream = picamera.PiCameraCircularIO(camera, seconds=10) | |
| try: | |
| while True: | |
| camera.wait_recording(1) | |
| periodic_capture() | |
| if detect_motion(camera): | |
| print('Motion detected!') | |
| note = Note(host='http://greenthumb-proto.herokuapp.com', | |
| title='This is an automated upload', | |
| profile=profile, | |
| user_email='myemail@gmail.com', | |
| user_token='sometokenhere') | |
| # Split the camera recording to the note frames buffer | |
| camera.split_recording(note) | |
| # Copy the last 10 seconds of the stream prior to the split to the note | |
| stream.copy_to(note, seconds=10) | |
| # Clear the in memory stream buffer | |
| stream.clear() | |
| # Wait on recording while detect_motion is true for the camera feed | |
| while detect_motion(camera): | |
| camera.wait_recording(1) | |
| print('Motion stopped!') | |
| # Split the camera recording back to the in memory stream buffer | |
| camera.split_recording(stream) | |
| note.create_note() | |
| finally: | |
| camera.stop_recording() |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Taking from the examples of split capturing from a circular stream and custom output objects