Last active
December 16, 2015 17:29
-
-
Save AlexanderFabisch/5470991 to your computer and use it in GitHub Desktop.
Visualizes very simple error functions of neural networks with tanh activation functions and sum of squared errors.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
reset | |
set pm3d | |
set hidden3d | |
set isosample 100, 100 | |
unset surface | |
unset xtics | |
unset ytics | |
unset ztics | |
unset border | |
unset key | |
unset colorbox | |
r = 20 | |
set xrange [-r:r] | |
set yrange [-r:r] | |
max(x, y) = x > y ? x : y | |
relu(a) = max(a, 0) | |
logit(a) = 1/(1+exp(-a)) | |
#set view 43, 54 | |
#y(x, w1, w2) = w1+w2*x | |
#set view 46, 36 | |
#y(x, w1, w2) = w1+tanh(w2*x) | |
#set view 62, 166 | |
#y(x, w1, w2) = tanh(w1+tanh(w2*x)) | |
#splot (y(0,x,y)-1)**2+(y(1,x,y)-2)**2+(y(2.5,x,y)-1.5)**2 t "" | |
#y(x1, x2, w1, w2) = tanh(w1+tanh(w2*x1+2*x2)+3*tanh(-2*x1+2*x2)-2*tanh(x1-2*x2)) | |
#y(x1, x2, w1, w2) = max(0, w1+max(0, w2*x1+2*x2)+3*max(0, -2*x1+2*x2)-2*max(0, x1-2*x2)) | |
#splot (y(0,1,x,y)-1)**2+(y(1,0,x,y)-2)**2+(y(0.5,0.5,x,y)-1.5)**2 t "" | |
#y(x1, x2, w1, w2) = logit(0.2*logit(w1*x1+w2*x2+0.2) + -0.2*logit(-0.1*x1+0.25*x2-0.1) + 0.1) | |
#y(x1, x2, w1, w2) = logit(0.2*logit(0.2*x1-0.1*x2+0.2) + w1*logit(-0.1*x1+w2*x2-0.1) + 0.1) | |
#y(x1, x2, w1, w2) = logit(w1*logit(0.2*x1-0.1*x2+0.2) + w2*logit(-0.1*x1+0.25*x2-0.1) + 0.1) | |
y(x1, x2, w1, w2) = relu(w1*relu(0.2*x1-0.1*x2+0.2) + w2*relu(-0.1*x1+0.25*x2-0.1) + 0.1) | |
splot (y(0,1,x,y)-1)**2+(y(1,0,x,y)-1)**2+(y(0,0,x,y)-0)**2+(y(1,1,x,y)-0)**2 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment