Skip to content

Instantly share code, notes, and snippets.

@anoken
Created October 22, 2019 16:00
Show Gist options
  • Save anoken/bede6d4b1b3dc487b44fc42043ebd506 to your computer and use it in GitHub Desktop.
Save anoken/bede6d4b1b3dc487b44fc42043ebd506 to your computer and use it in GitHub Desktop.
m5stickv_imu_test
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout,Conv2D,MaxPooling2D,Flatten,ZeroPadding2D
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
import seaborn as sns
# 学習用のデータを作る.
image_list = []
label_list = []
LABELS = []
label = 0
filenames = os.listdir("train")
for dir in sorted(filenames):
dir1 = "./train/" + dir
for file in os.listdir(dir1):
if file != ".DS_Store":
label_list.append(label)
filepath = dir1 + "/" + file
print(filepath)
image = Image.open(filepath)
data = np.asarray(image)
image_list.append(data)
label = label + 1
LABELS.append(dir)
image_list = np.array(image_list)
image_list = image_list.astype('float32')
image_list = image_list / 255.0
Y = to_categorical(label_list)
print(Y)
print(image_list)
X_train, X_test, y_train, y_test = train_test_split(image_list, Y, test_size=0.20)
# モデルを生成してニューラルネットを構築
model = Sequential()
input_shape=(8, 8, 3)
model.add(ZeroPadding2D(padding=((1, 1), (1, 1)), input_shape=input_shape))
model.add(Conv2D(32, (3, 3),input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding=("same")))
#model.add(ZeroPadding2D(padding=((1, 1), (1, 1)), input_shape=input_shape))
#model.add(Conv2D(64, (3, 3)))
#model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2), padding=("same")))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(label,activation='softmax'))
opt = Adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
history=model.fit(X_train, y_train, nb_epoch=50)
#print(image_list)
def plot_graph(history):
epochs = range(len(history.history['acc']))
plt.plot(epochs,history.history['acc'], marker='.', label='acc')
# plt.plot(epochs,history.history['val_acc'], marker='.', label='val_acc')
plt.autoscale()
plt.title('model accuracy&loss')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(loc='best')
# plt.savefig('./acc_graph.png')
# plt.show()
# plt.clf()
plt.plot(epochs,history.history['loss'], marker='.', label='loss')
# plt.plot(epochs,history.history['val_loss'], marker='.', label='val_loss')
plt.autoscale()
# plt.title('model loss')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.savefig('./loss_graph.png')
# plt.show()
plot_graph(history)
# evaluate
score = model.evaluate(X_test, y_test, verbose=1)
print('loss=', score[0])
print('accuracy=', score[1])
#confusion_matrix
pred_y = model.predict(X_test.astype(np.float32))
pred_y_classes = np.argmax(pred_y, axis = 1)
tue_y= np.argmax(y_test, axis = 1)
confusion_mtx = confusion_matrix(tue_y, pred_y_classes)
plt.figure(figsize=(8, 6))
sns.heatmap(confusion_mtx, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('./confusion_matrix.png')
#plt.show();
model.save('my_mbnet.h5')
converter = tf.lite.TFLiteConverter.from_keras_model_file('my_mbnet.h5')
tflite_model = converter.convert()
open('my_mbnet.tflite', "wb").write(tflite_model)
import subprocess
subprocess.run(['./ncc/ncc','my_mbnet.tflite','my_mbnet.kmodel','-i','tflite','-o',
'k210model','--dataset','images'])
import sensor, image, time,lcd,machine
from machine import I2C
# I2C Check
i2c = I2C(I2C.I2C0, freq=100000, scl=28, sda=29)
devices = i2c.scan()
print(devices)
lcd.init()
# LCD Backlight
AXP192_ADDR=0x34
Backlight_ADDR=0x91
level=50
val = (level+7) << 4
i2c.writeto_mem(AXP192_ADDR, Backlight_ADDR,int(val))
# IMU6866 define
MPU6886_ADDRESS=0x68
MPU6886_WHOAMI=0x75
MPU6886_ACCEL_INTEL_CTRL= 0x69
MPU6886_SMPLRT_DIV=0x19
MPU6886_INT_PIN_CFG= 0x37
MPU6886_INT_ENABLE=0x38
MPU6886_ACCEL_XOUT_H= 0x3B
MPU6886_TEMP_OUT_H=0x41
MPU6886_GYRO_XOUT_H= 0x43
MPU6886_USER_CTRL= 0x6A
MPU6886_PWR_MGMT_1=0x6B
MPU6886_PWR_MGMT_2=0x6C
MPU6886_CONFIG=0x1A
MPU6886_GYRO_CONFIG= 0x1B
MPU6886_ACCEL_CONFIG= 0x1C
MPU6886_ACCEL_CONFIG2= 0x1D
MPU6886_FIFO_EN= 0x23
# IMU6866 Initialize
def write_i2c(address, value):
i2c.writeto_mem(MPU6886_ADDRESS, address, bytearray([value]))
time.sleep_ms(10)
write_i2c(MPU6886_PWR_MGMT_1, 0x00)
write_i2c(MPU6886_PWR_MGMT_1, 0x01<<7)
write_i2c(MPU6886_PWR_MGMT_1,0x01<<0)
write_i2c(MPU6886_ACCEL_CONFIG,0x10)
write_i2c(MPU6886_GYRO_CONFIG,0x18)
write_i2c(MPU6886_CONFIG,0x01)
write_i2c(MPU6886_SMPLRT_DIV,0x05)
write_i2c(MPU6886_INT_ENABLE,0x00)
write_i2c(MPU6886_ACCEL_CONFIG2,0x00)
write_i2c(MPU6886_USER_CTRL,0x00)
write_i2c(MPU6886_FIFO_EN,0x00)
write_i2c(MPU6886_INT_PIN_CFG,0x22)
write_i2c(MPU6886_INT_ENABLE,0x01)
# Button_A
fm.register(board_info.BUTTON_A, fm.fpioa.GPIO1)
but_a=GPIO(GPIO.GPIO1, GPIO.IN, GPIO.PULL_UP)
# Button_B
fm.register(board_info.BUTTON_B, fm.fpioa.GPIO2)
but_b = GPIO(GPIO.GPIO2, GPIO.IN, GPIO.PULL_UP)
but_a_pressed = 0
but_b_pressed = 0
# Read IMU6866 and Scaling
def read_imu():
aRes=255/4096/2
offset=128
accel = i2c.readfrom_mem(MPU6886_ADDRESS, MPU6886_ACCEL_XOUT_H, 6)
accel_x = (accel[0]<<8|accel[1])
accel_y = (accel[2]<<8|accel[3])
accel_z = (accel[4]<<8|accel[5])
if accel_x>32768:
accel_x=accel_x-65536
if accel_y>32768:
accel_y=accel_y-65536
if accel_z>32768:
accel_z=accel_z-65536
ax=int(accel_x*aRes+offset)
if ax<0: ax=0
if ax>255: ax=255
ay=int(accel_y*aRes+offset)
if ay<0: ay=0
if ay>255: ay=255
az=int(accel_z*aRes+offset)
if az<0: az=0
if az>255: az=255
accel_array = [ay,az,ax]
return accel_array
cnt=0
mode=0
save_flg=0
pic_no=0
accel_array_zero=(255,255,255)
#IMU_Image
w_size=8
view_size=120
imu_Image = image.Image()
imu_Image = imu_Image.resize(w_size, w_size)
image_data_array = []
while(True):
view_Image = image.Image()
# IMU Data to Image
accel_array = read_imu()
w=cnt%w_size
h=int(cnt/w_size)
imu_Image.set_pixel(w, h, accel_array)
width=imu_Image.width()
# IMU Data Save to SD
if cnt>imu_Image.width()*imu_Image.height():
cnt=0
pic_no+=1
if save_flg==1:
cnt_str="{0:04d}".format(pic_no)
mode_str="{0:04d}".format(mode)
fname="cnt_str"+mode_str+"_"+cnt_str+".jpg"
print(fname)
imu_Image.save(fname, quality=99)
# IMU Data_View
w=(cnt+1)%w_size
h=int((cnt+1)/w_size)
imu_Image.set_pixel(w, h, accel_array_zero)
img_buff=imu_Image.resize(view_size,view_size)
view_Image.draw_image(img_buff,100,8)
#imu_Image.pix_to_ai()
if save_flg==1:
view_Image.draw_string(0, 40, "REC", (255,0,0),scale=3)
class_str=str(mode);
view_Image.draw_string(0, 70,class_str, (255,0,0),scale=5)
if cnt%width<width/2:
view_Image.draw_circle(30, 15, 15,(255,0,0),fill=1)
lcd.display(view_Image)
cnt=cnt+1
if but_a.value() == 0 and but_a_pressed == 0:
but_a_pressed=1
if save_flg==0:
save_flg=1
print("save_start")
elif save_flg==1:
save_flg=0
if but_a.value() == 1 and but_a_pressed == 1:
but_a_pressed=0
if but_b.value() == 0 and but_b_pressed == 0:
but_b_pressed=1
mode+=1
if mode>10:
mode=0
if but_b.value() == 1 and but_b_pressed == 1:
but_b_pressed=0
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout,Conv2D,MaxPooling2D,Flatten,ZeroPadding2D
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
import seaborn as sns
# 学習用のデータを作る.
image_list = []
label_list = []
LABELS = []
label = 0
filenames = os.listdir("train")
for dir in sorted(filenames):
dir1 = "./train/" + dir
for file in os.listdir(dir1):
if file != ".DS_Store":
label_list.append(label)
filepath = dir1 + "/" + file
print(filepath)
image = Image.open(filepath)
data = np.asarray(image)
image_list.append(data)
label = label + 1
LABELS.append(dir)
image_list = np.array(image_list)
image_list = image_list.astype('float32')
image_list = image_list / 255.0
Y = to_categorical(label_list)
print(Y)
print(image_list)
X_train, X_test, y_train, y_test = train_test_split(image_list, Y, test_size=0.20)
# モデルを生成してニューラルネットを構築
model = Sequential()
input_shape=(8, 8, 3)
model.add(ZeroPadding2D(padding=((1, 1), (1, 1)), input_shape=input_shape))
model.add(Conv2D(32, (3, 3),input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding=("same")))
#model.add(ZeroPadding2D(padding=((1, 1), (1, 1)), input_shape=input_shape))
#model.add(Conv2D(64, (3, 3)))
#model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2, 2), padding=("same")))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(label,activation='softmax'))
opt = Adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
history=model.fit(X_train, y_train, nb_epoch=50)
#print(image_list)
def plot_graph(history):
epochs = range(len(history.history['acc']))
plt.plot(epochs,history.history['acc'], marker='.', label='acc')
# plt.plot(epochs,history.history['val_acc'], marker='.', label='val_acc')
plt.autoscale()
plt.title('model accuracy&loss')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(loc='best')
# plt.savefig('./acc_graph.png')
# plt.show()
# plt.clf()
plt.plot(epochs,history.history['loss'], marker='.', label='loss')
# plt.plot(epochs,history.history['val_loss'], marker='.', label='val_loss')
plt.autoscale()
# plt.title('model loss')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.savefig('./loss_graph.png')
# plt.show()
plot_graph(history)
# evaluate
score = model.evaluate(X_test, y_test, verbose=1)
print('loss=', score[0])
print('accuracy=', score[1])
#confusion_matrix
pred_y = model.predict(X_test.astype(np.float32))
pred_y_classes = np.argmax(pred_y, axis = 1)
tue_y= np.argmax(y_test, axis = 1)
confusion_mtx = confusion_matrix(tue_y, pred_y_classes)
plt.figure(figsize=(8, 6))
sns.heatmap(confusion_mtx, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('./confusion_matrix.png')
#plt.show();
model.save('my_mbnet.h5')
converter = tf.lite.TFLiteConverter.from_keras_model_file('my_mbnet.h5')
tflite_model = converter.convert()
open('my_mbnet.tflite', "wb").write(tflite_model)
import subprocess
subprocess.run(['./ncc/ncc','my_mbnet.tflite','my_mbnet.kmodel','-i','tflite','-o',
'k210model','--dataset','images'])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment