Created
November 11, 2023 07:53
-
-
Save oianmol/bdab683c0f4b726393f372748ed24ddd to your computer and use it in GitHub Desktop.
motion_detection.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Python program to implement | |
# Webcam Motion Detector | |
import os | |
# importing OpenCV, time and Pandas library | |
import cv2, time, pandas | |
import numpy as np | |
import ffmpegcv | |
# importing datetime class from datetime library | |
from datetime import datetime, timedelta | |
import argparse | |
from multiprocessing import Process | |
import threading | |
def execute(): | |
frame1 = None | |
minimum = 1000 # Define Min Contour area | |
method = 'KNN' | |
# Constructing a parser | |
ap = argparse.ArgumentParser() | |
try: | |
os.mkdir("motion") | |
except: | |
print("motiuon already exist") | |
# Adding arguments | |
ap.add_argument("-v", "--video", help="Video Stream") | |
ap.add_argument("-a", "--area", help="Contour area") | |
args = vars(ap.parse_args()) | |
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=False) | |
knn = cv2.createBackgroundSubtractorKNN(detectShadows=False) | |
# Capturing video | |
video = cv2.VideoCapture(args["video"]) | |
m_time = os.path.getmtime(args["video"]) | |
# convert timestamp into DateTime object | |
dt_m = datetime.fromtimestamp(m_time) | |
print('Modified on:', dt_m) | |
fps = video.get(cv2.CAP_PROP_FPS) | |
print("FPS-" + str(fps)) | |
frame_no = 0 | |
motion = 0 | |
original_date_time = dt_m | |
# Infinite while loop to treat stack of image as video | |
while True: | |
# Reading frame(image) from video | |
exists, frame = video.read() | |
if exists: | |
date_time = original_date_time + timedelta(milliseconds=int(video.get(cv2.CAP_PROP_POS_MSEC))) | |
else: | |
if motion == 1: | |
end_time = date_time.strftime("%m-%d-%Y_%H:%M:%S") | |
try: | |
open("motion/" + end_time + "_end", "x").close() | |
except: | |
print("motion end") | |
print("motion end time : " + end_time) | |
motion = 0 | |
break | |
frame_no += 1 | |
vid = cv2.flip(frame, 1) | |
try: | |
# Converting color image to gray_scale image | |
if method == 'MOG2': | |
vid = cv2.cvtColor(vid, cv2.COLOR_BGR2GRAY) | |
bgs = mog.apply(vid) | |
elif method == 'KNN': | |
vid = cv2.cvtColor(vid, cv2.COLOR_BGR2GRAY) | |
bgs = knn.apply(vid) | |
elif method == 'ABS': | |
frame = cv2.GaussianBlur(vid, (7, 7), 0) | |
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
# In first iteration we assign the value | |
# of static_back to our first frame | |
if frame1 is None: | |
frame1 = frame | |
continue | |
# Difference between static background | |
# and current frame(which is GaussianBlur) | |
framedelta = cv2.absdiff(frame1, frame) | |
# If change in between static background and | |
# current frame is greater than 30 it will show white color(255) | |
retval, bgs = cv2.threshold(framedelta.copy(), 30, 255, cv2.THRESH_BINARY) | |
except: | |
break | |
mask = np.zeros_like(frame) | |
# Finding contour of moving object | |
contours, _ = cv2.findContours(bgs.copy(), | |
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | |
contours = sorted(contours, key=cv2.contourArea, reverse=True) | |
for cnt in contours: | |
if cv2.contourArea(cnt) < minimum: | |
if motion == 1: | |
end_time = date_time.strftime("%m-%d-%Y_%H:%M:%S") | |
try: | |
open("motion/" + end_time + "_end", "x").close() | |
except: | |
print("motion end") | |
print("motion end time : " + end_time) | |
motion = 0 | |
continue | |
if motion == 0: | |
motion = 1 | |
start_time = date_time.strftime("%m-%d-%Y_%H:%M:%S") | |
try: | |
open("motion/" + start_time + "_start", "x").close() | |
print("motion start time : " + start_time) | |
except: | |
open("motion/" + start_time + "__start", "x").close() | |
print("motion start time : " + start_time) | |
(x, y, w, h) = cv2.boundingRect(cnt) | |
cv2.rectangle(vid, (x, y), (x + w, y + h), (0, 255, 10), 1) | |
cv2.putText(vid, f'{method}', (20, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0, 2)) | |
cv2.putText(vid, 'Motion Detected', (20, 40), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0, 2)) | |
cv2.putText(vid, 'date_time ' + date_time.strftime("%m-%d-%Y_%H:%M:%S"), (20, 60), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0, 2)) | |
cv2.drawContours(mask, cnt, -1, 255, 3) | |
break | |
# cv2.imshow('Original Frame', vid) | |
# cv2.imshow(method, bgs) | |
key = cv2.waitKey(1) | |
if key == ord('q') or key == ord('Q'): | |
# if something is movingthen it append the end time of movement | |
break | |
elif key == ord('M') or key == ord('m'): | |
method = 'MOG2' | |
elif key == ord('K') or key == ord('k'): | |
method = 'KNN' | |
elif key == ord('A') or key == ord('a'): | |
method = 'ABS' | |
video.release() | |
# Destroying all the windows | |
cv2.destroyAllWindows() | |
if __name__ == '__main__': | |
t1 = threading.Thread(target= execute) | |
t1.start() | |
t1.join() | |
# if __name__ == '__main__': | |
# process_list = [] | |
# | |
# for num in range(0, 10): | |
# process = Process(target=execute) | |
# process_list.append(process) | |
# | |
# for process in process_list: | |
# process.start() | |
# | |
# for process in process_list: | |
# process.join() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment