Skip to content

Instantly share code, notes, and snippets.

@github-shakti
Last active September 17, 2017 19:45
Show Gist options
  • Select an option

  • Save github-shakti/7251f50ddcb51f429f0fb160f8d12193 to your computer and use it in GitHub Desktop.

Select an option

Save github-shakti/7251f50ddcb51f429f0fb160f8d12193 to your computer and use it in GitHub Desktop.
Perceptron Model
#! python3
#This is a perceptron implementation written using tensor flow to learn basics of tensor flow
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
#(Train 55k / Test 10k / Validate 10k)
#structure of Data : x:mnist.train.images y:mnist.train.label
#mnist.train.images : rc [55000, 784]
#mnist.train.labels : rc [55000, 10]
#y=softmax(Wx+b)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#! python3
import matplotlib.pyplot as mp
import numpy as np
import random
mp.ion()
fig=mp.figure()
mp.axis([0,100,-1,2])
data = [
(np.array([0,0,1]), 0),
(np.array([1,0,1]), 1),
(np.array([0,1,1]), 1),
(np.array([1,1,1]), 1),
]
w = np.random.rand(3)
unit_step = lambda x: 0 if x < 0 else 1
errors = []
lr=0.1
epoch=100
for i in range(epoch):
x, expected = random.choice(data)
result = np.dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += lr * error * x
ssa=w
mp.subplot(3, 1, 1)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.ylabel('Error Values')
mp.plot(i, error,'m.')
mp.subplot(3, 1, 2)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.plot(i, ssa[0],'r.')
mp.plot(i, ssa[1],'b.')
mp.plot(i, ssa[2],'g.')
mp.ylabel('Change in Weights')
mp.subplot(3, 1, 3)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.plot(i, x[0],'r.')
mp.plot(i, x[1],'b_')
#mp.plot(i, x[2],'g.')
mp.ylabel(' X-Values')
mp.show()
mp.pause(0.0001)
while True:
mp.pause(0.0001)
for x, _ in data:
result = np.dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
#! python3
import matplotlib.pyplot as mp
import numpy as np
import random
mp.ion()
fig=mp.figure()
mp.axis([0,100,-1,2])
data = [
(np.array([0,0,1]), 0),
(np.array([1,0,1]), 1),
(np.array([0,1,1]), 1),
(np.array([1,1,1]), 0),
]
w = np.random.rand(3)
unit_step = lambda x: 0 if x < 0 else 1
errors = []
lr=0.1
epoch=100
for i in range(epoch):
x, expected = random.choice(data)
result = np.dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += lr * error * x
ssa=w
mp.subplot(3, 1, 1)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.ylabel('Error Values')
mp.plot(i, error,'m.')
mp.subplot(3, 1, 2)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.plot(i, ssa[0],'r.')
mp.plot(i, ssa[1],'b.')
mp.plot(i, ssa[2],'g.')
mp.ylabel('Change in Weights')
mp.subplot(3, 1, 3)
mp.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.2, hspace=0.3)
mp.plot(i, x[0],'r.')
mp.plot(i, x[1],'b_')
#mp.plot(i, x[2],'g.')
mp.ylabel(' X-Values')
mp.show()
mp.pause(0.0001)
while True:
mp.pause(0.0001)
for x, _ in data:
result = np.dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
#! python3
#
#Credits : https://blog.dbrgn.ch/2013/3/26/perceptrons-in-python/
#
import matplotlib.pyplot as mp
import numpy as np
import random
mp.ion()
fig=mp.figure()
mp.axis([0,100,-1,2])
data = [
(np.array([0,0,1]), 0),
(np.array([0,1,1]), 1),
(np.array([1,0,1]), 1),
(np.array([1,1,1]), 1),
]
w = np.random.rand(3)
unit_step = lambda x: 0 if x < 0 else 1
errors = []
lr=0.1
epoch=100
for i in range(epoch):
x, expected = random.choice(data)
result = np.dot(w, x)
error = expected - unit_step(result)
errors.append(error)
w += lr * error * x
ssa=w
mp.plot(i, error,'m.')
mp.plot(i, ssa[0],'g.')
mp.plot(i, ssa[1],'r.')
mp.plot(i, ssa[2],'b.')
mp.plot(i, x[0],'r_')
mp.plot(i, x[1],'b_')
#mp.plot(i, x[2],'r.')
mp.show()
mp.pause(0.0001)
for x, _ in data:
result = np.dot(x, w)
print("{}: {} -> {}".format(x[:2], result, unit_step(result)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment