Created
November 30, 2020 02:07
-
-
Save N8python/ed456c25a7feadceafc3165f66263e09 to your computer and use it in GitHub Desktop.
This neuron has a 2-layer polynomial activation function, combined with a sigmoid at the end.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
const data = [ | |
[ | |
[0, 0], 1 | |
], | |
[ | |
[0, 1], 0 | |
], | |
[ | |
[1, 0], 0 | |
], | |
[ | |
[1, 1], 1 | |
] | |
]; | |
const sigmoid = x => 1 / (1 + Math.exp(-x)); | |
const sigmoid_ = x => sigmoid(x) * (1 - sigmoid(x)); | |
const randWeight = () => Math.random() * 2 - 1; | |
let a0 = randWeight(); | |
let b0 = randWeight(); | |
let c0 = randWeight(); | |
let a1 = randWeight(); | |
let b1 = randWeight(); | |
let c1 = randWeight(); | |
let m0 = randWeight(); | |
let m1 = randWeight(); | |
let bm = randWeight(); | |
let a = randWeight(); | |
let b = randWeight(); | |
let c = randWeight(); | |
const learningRate = 0.01; | |
for (let iteration = 0; iteration < 100000; iteration++) { | |
const m0nudges = []; | |
const m1nudges = []; | |
const bmnudges = []; | |
const anudges = []; | |
const bnudges = []; | |
const cnudges = []; | |
const a0nudges = []; | |
const a1nudges = []; | |
const b0nudges = []; | |
const b1nudges = []; | |
const c0nudges = []; | |
const c1nudges = []; | |
data.forEach(([ | |
[x0, x1], y | |
]) => { | |
const x0f = a0 * x0 ** 2 + b0 * x0 + c0; | |
const x1f = a1 * x1 ** 2 + b1 * x1 + c1; | |
const W = m0 * x0f + m1 * x1f + bm; | |
const X = a * W ** 2 + b * W + c; | |
const Z = sigmoid(X); | |
const C = ((Z - y) ** 2) / 2; | |
m0nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * x0f); | |
m1nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * x1f); | |
bmnudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b)); | |
anudges.push((Z - y) * sigmoid_(X) * W ** 2); | |
bnudges.push((Z - y) * sigmoid_(X) * W); | |
cnudges.push((Z - y) * sigmoid_(X)); | |
a0nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m0 * (x0 ** 2)); | |
a1nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m1 * (x1 ** 2)); | |
b0nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m0 * (x0)); | |
b1nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m1 * (x1)); | |
c0nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m0); | |
c1nudges.push((Z - y) * sigmoid_(X) * (2 * a * W + b) * m1); | |
}); | |
m0 += -R.mean(m0nudges) * learningRate; | |
m1 += -R.mean(m1nudges) * learningRate; | |
bm += -R.mean(bmnudges) * learningRate; | |
a += -R.mean(anudges) * learningRate; | |
b += -R.mean(bnudges) * learningRate; | |
c += -R.mean(cnudges) * learningRate; | |
a0 += -R.mean(a0nudges) * learningRate; | |
b0 += -R.mean(bmnudges) * learningRate; | |
c0 += -R.mean(c0nudges) * learningRate; | |
a1 += -R.mean(a1nudges) * learningRate; | |
b1 += -R.mean(b1nudges) * learningRate; | |
c1 += -R.mean(c1nudges) * learningRate; | |
} | |
function runModel(x0, x1) { | |
const x0f = a0 * x0 ** 2 + b0 * x0 + c0; | |
const x1f = a1 * x1 ** 2 + b1 * x1 + c1; | |
const W = m0 * x0f + m1 * x1f + bm; | |
const X = a * W ** 2 + b * W + c; | |
const Z = sigmoid(X); | |
return Z; | |
} | |
console.log(m0, m1, bm, a, b, c, a0, a1, b0, b1, c0, c1); | |
console.log(runModel(0, 0)); | |
console.log(runModel(0, 1)); | |
console.log(runModel(1, 0)); | |
console.log(runModel(1, 1)); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment