Skip to content

Instantly share code, notes, and snippets.

import Leap, sys
from Leap import KeyTapGesture
class SampleListener(Leap.Listener):
def on_connect(self, controller):
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
@tsu-nera
tsu-nera / twiddle.ipynb
Created July 23, 2017 13:19
Afrel 楕円コース PID制御 パラメータチューニング(twiddle)
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@tsu-nera
tsu-nera / pid_control.py
Last active July 23, 2017 09:00
P, PD, PID制御
import random
import numpy as np
import matplotlib.pyplot as plt
# ------------------------------------------------
#
# this is the Robot class
#
class Robot(object):
@tsu-nera
tsu-nera / show.ipynb
Last active July 23, 2017 03:32
Afrel楕円コースのPID制御シミュレーション http://afrel-shop.com/shopdetail/007001000018/
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@tsu-nera
tsu-nera / USDJPY_dqn.ipynb
Created July 12, 2017 13:43
FX: USDJPYのヒストリカルデータを利用してDQNでバックテスト
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@tsu-nera
tsu-nera / sine_wave_dqn.ipynb
Last active July 12, 2017 12:44
sine_wave_dqn.ipynb
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@tsu-nera
tsu-nera / dqn_cartpole_numpy.py
Created July 9, 2017 08:42
DQN cartpole with numpy only
import gym
import numpy as np
from collections import deque
from gym import wrappers
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '/tmp/cartpole-experiment-1')
def relu(x):
import gym
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from collections import deque
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
import gym
import numpy as np
import gym_tic_tac_toe
import random
from math import floor
import matplotlib.pyplot as plt
env = gym.make('tic_tac_toe-v0')
n_states = 3 ** 9
@tsu-nera
tsu-nera / crawler.py
Created June 30, 2017 20:12
強化学習(Q-Learning)で LEGO Mindstormsの crawlerを動かす
import ev3dev.ev3 as ev3
import random, os, time
import pickle
class QLearningAgent():
"""
Q-Learning Agent
Instance variables