Last active
December 22, 2020 15:38
-
-
Save ven-kyoshiro/2cbf6dd33c3545290311c2bbaaf8f30e to your computer and use it in GitHub Desktop.
For 41
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tqdm | |
import time | |
import pandas as pd | |
import numpy as np | |
import cv2 | |
import matplotlib.pyplot as plt | |
from sklearn.cluster import DBSCAN | |
def measure_position(arr,mouse_area_max,mouse_area_min, | |
eps ,min_sample): | |
v1 = mouse_area_max | |
v2 = mouse_area_min | |
img = np.mean(arr,axis=2) | |
tmp = (img>=v2)*(img<=v1) | |
x,y = np.where(tmp) | |
X = np.concatenate([x[:,np.newaxis],y[:,np.newaxis]],axis=1) | |
db = DBSCAN(eps=eps, min_samples=min_sample).fit(X) | |
labels = db.labels_ | |
mouse_ids = np.where(labels==0) | |
mouse_xs = x[mouse_ids] | |
mouse_ys = y[mouse_ids] | |
l = len(set(list(labels))) #len([-1,0]) | |
return np.mean(mouse_xs),np.mean(mouse_ys),np.median(mouse_xs),np.median(mouse_ys), labels,X,l | |
def colorlize_mouse(labels,X,i,records): | |
plt.figure(figsize=(12, 8)) | |
# 塗り分けマウス全部ウツ | |
plt.scatter(X[:,0], X[:,1],c = labels,cmap='jet',s=0.5) | |
# マウスの中心をウツ | |
mouse_ids = np.where(labels==0) | |
mouse_xs = X[:,0][mouse_ids] | |
mouse_ys = X[:,1][mouse_ids] | |
plt.scatter([np.mean(mouse_xs)],[np.mean(mouse_ys)],color='lightgreen',marker='x') | |
plt.scatter([np.median(mouse_xs)],[np.median(mouse_ys)],color='orange',marker='x') | |
# 軌跡を描く | |
plt.plot(records['mean_x'],records['mean_y'],color='lightgreen') | |
plt.plot(records['med_x'],records['med_y'],color='orange') | |
plt.savefig(f'mouse_img_{i}',dpi=200) | |
plt.close() | |
def video2activity(video_name,csv_name,interval=300,mouse_area_max=70,mouse_area_min=30, | |
eps = 20,min_sample = 700,visualize=False): | |
st = time.time() | |
count = 0 | |
cap = cv2.VideoCapture(video_name) | |
frame_len = cap.get(cv2.CAP_PROP_FRAME_COUNT) | |
assert cap.isOpened(),'could not read the video' | |
records = { | |
'frame':[], | |
'mean_activity':[0,], | |
'med_activity':[0,], | |
'mean_x':[], | |
'mean_y':[], | |
'med_x':[], | |
'med_y':[], | |
} | |
i = 0 | |
flug = False | |
for _ in tqdm.tqdm(range(int(frame_len*1000/999+1))): | |
ret, frame = cap.read() | |
if ret == False : | |
break | |
if i%interval == 0: | |
frame = np.reshape(frame,(1,int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),3)) | |
mean_x, mean_y, med_x, med_y, labels,X,l = measure_position( | |
frame[0],mouse_area_max=mouse_area_max, | |
mouse_area_min=mouse_area_min, eps = eps,min_sample = min_sample) | |
records['frame'].append(i) | |
records['mean_x'].append(mean_x) | |
records['mean_y'].append(mean_y) | |
records['med_x'].append(med_x) | |
records['med_y'].append(med_y) | |
if len(records['mean_x'])>1: | |
records['mean_activity'].append(np.sqrt( | |
(records['mean_x'][-2]-mean_x)**2 + (records['mean_y'][-2]-mean_y)**2 | |
)) | |
records['med_activity'].append(np.sqrt( | |
(records['med_x'][-2]-med_x)**2 + (records['med_y'][-2]-med_y)**2 | |
)) | |
if visualize or l!=2: | |
colorlize_mouse(labels,X,i,records) | |
if i%1000==500 and flug==False: | |
# 29.97mを30fpsに変換 | |
flug = True | |
pass # 1000フレに1フレ,カウントを進めない | |
else: | |
i+=1 | |
flug = False | |
cap.release() | |
print(f'it takes {time.time()-st}[sec]') | |
df = pd.DataFrame(records) | |
df.to_csv(csv_name) | |
if __name__ == '__main__': | |
video2activity('200922_4_10min.mp4','test.csv',interval=30) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment