First install Python's Raspberry Pi GPIO module, it should be installed by default but let's just be sure.
$ sudo apt-get install python-rpi.gpioThen import the module into the python script.
To solve this challenge I combined convolutional, long short-term memory and fully connected networks to form a CLDNN as described in Google's research paper. The neural network was implemented using TFLearn.
import gym
import random
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression | import numpy as np | |
| import os | |
| # Features and labels | |
| training_set_inputs = np.array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]]) | |
| training_set_outputs = np.transpose(np.array([[0, 1, 1, 1]])) | |
| # Predict | |
| predict = np.array([[0, 0, 1]]) | |
| # Check if model has been saved | |
| if not os.path.exists('model.npy'): |
| import numpy as np | |
| import os | |
| class NeuralNetwork: | |
| def __init__(self): | |
| # Our training data | |
| self.X = np.array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]]) | |
| self.y = np.transpose(np.array([[0, 1, 1, 1]])) | |
| # Seed random number generator to produce the same |
| import numpy as np | |
| import os | |
| class NeuralNetwork: | |
| def __init__(self): | |
| # Our training data | |
| self.X = np.array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]]) | |
| self.y = np.transpose(np.array([[0, 1, 1, 1]])) | |
| # Seed random number generator to produce the same |
| import numpy as np | |
| import os | |
| class NeuralNetwork: | |
| def __init__(self): | |
| # Our training data | |
| self.X = np.array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1], [0, 0, 0], [0, 1, 0]]) | |
| self.y = np.transpose(np.array([[0, 1, 1, 1, 1, 0]])) | |
| # Seed random number generator to produce the same |
| from numpy import * | |
| if __name__ == '__main__': | |
| run() |
| from numpy import * | |
| if __name__ == '__main__': | |
| run() |
| def run(): | |
| points = genfromtxt('data.csv', delimiter=',') | |
| # Hyperparameter | |
| learning_rate = 0.0001 | |
| # Initial values: y = mx + c | |
| initial_c = 0 | |
| initial_m = 0 | |
| # Iterations | |
| num_iterations = 1000 | |
| # Optimal values for m and c |
| def gradient_descent_runner(points, starting_c, starting_m, learning_rate, num_iterations): | |
| c = starting_c | |
| m = starting_m | |
| # Iterate | |
| for i in range(num_iterations): | |
| c, m = step_gradient(c, m, array(points), learning_rate) | |
| return [c, m] |