Skip to content

Instantly share code, notes, and snippets.

@don-smith
Created April 1, 2015 09:59
Show Gist options
  • Save don-smith/56c6c5ae4be8c6b771a7 to your computer and use it in GitHub Desktop.
Save don-smith/56c6c5ae4be8c6b771a7 to your computer and use it in GitHub Desktop.
Perceptron and Sigmoid neuron behaviour
library(ggplot2)
# calculates the output of a sigmoid neuron
# given the denormalized result of a perceptron
sigmoidize <- function(z) {
1 / (1+exp(-z))
}
# normalizes the value of a perceptron
normalize <- function(input) {
ifelse(input <= 0, 0, 1)
}
# calculates a neuron's output
process.neuron <- function(x, w, b, constant = F) { # x and w are vectors
if(constant) {
w <- w * constant; b <- b * constant;
}
r <- sum(x * w) + b
r <- ifelse(normalizing, normalize(r), r)
r <- ifelse(sigmoidizing, sigmoidize(r), r)
}
# processes the neurons in a level
process.layer <- function(i, x, w, b, constant) {
correction <- ifelse(sigmoidizing, 0.5, 0)
ifelse(constant == 0, correction, process.neuron(x[,i], w[,i], b[i], constant))
}
# defines and processes the input layer
input.layer <- function(constant = F) {
# 3 neurons, 2 inputs and 1 bias each, 1 weight per input
x <- matrix(c(.4,.9, .5,.7, .5,.7), ncol = 3)
w <- matrix(c(.7,.3, .4,.6, .5,.5), ncol = 3)
b <- matrix(c(-.56, -.61, -.62), ncol = 3)
i <- seq(1, length(b)) # neuron iterator
sapply(i, process.layer, x, w, b, constant) # results
}
# defines and processes the hidden layer
hidden.layer <- function(il, constant = F) {
# 2 neurons, 3 inputs and 1 bias each, 1 weight per input
hx <- matrix(c(il, il), ncol = 2) # apply inputs to each hidden neuron
hw <- matrix(c(.2,.6,.2, .2,.3,.1), ncol = 2) # hidden weights (3 per neuron)
hb <- matrix(c(-.5, -.3), ncol = 2) # hidden biases (1 per neuron)
i <- seq(1, length(hb)) # neuron iterator
sapply(i, process.layer, hx, hw, hb, constant) # results
}
# defines and processes the output layer
output.layer <- function(hl, constant = F) {
# 1 neuron, 2 inputs and 1 bias each, 1 weight per input
ox <- matrix(hl, ncol = 1) # apply inputs to each hidden neuron
ow <- matrix(c(.1,.2), ncol = 1) # output weights
ob <- matrix(c(-.3), ncol = 1) # output bias
sapply(1, process.layer, ox, ow, ob, constant) # results
}
apply.constant <- function(constant = F) {
# comment out the latermost layers you're not interested in
il <- input.layer(constant)
# hl <- hidden.layer(il, constant)
# ol <- output.layer(hl, constant)
}
# configuration
normalizing <- F
sigmoidizing <- T
# Scenarios for different constants
# constants <- F
# constants <- c(0.3)
# constants <- c(0.3, 0.4, 0.5)
constants <- seq(-400, 400, by = 10)
# process the network
network.output <- sapply(constants, apply.constant)
# plot the network
# qplot(constants, network.output, main = "Perceptron neurons", ylab = "network output", xlab = "constant (c)")
# qplot(constants, network.output, main = "Sigmoid neurons", ylab = "network output", xlab = "constant (c)")
# process layers - defined by layers in apply.constant() above
layer <- sapply(constants, apply.constant)
# plot the layer's neurons
df <- data.frame(constants, t(layer))
ggplot(df, aes(constants, X1)) +
labs(title="Input Layer", x="Constant", y="Neuron Output") +
geom_point(data=df, aes(constants, X1), color="red") +
geom_point(data=df, aes(constants, X2), color="blue") +
geom_point(data=df, aes(constants, X3), color="green")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment