Created
January 31, 2020 16:16
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
const hiddenNumNeurons = 20 | |
const hidden2NumNeurons = 5 | |
const learningRate = 0.01 | |
const num_iterations = 100 | |
const batch_size = 20 | |
const weights = tf.variable(tf.randomNormal([2, hiddenNumNeurons])) | |
const biases = tf.variable(tf.zeros([hiddenNumNeurons])) | |
const weights2 = tf.variable(tf.randomNormal([hiddenNumNeurons, hidden2NumNeurons])) | |
const biases2 = tf.variable(tf.zeros([hidden2NumNeurons])) | |
const outWeights = tf.variable(tf.randomNormal([hidden2NumNeurons, 1])) | |
const outBias = tf.variable(tf.zeros([1])) | |
const optimizer = tf.train.adam(learningRate) | |
const epsilon = tf.scalar(1e-7) | |
const one = tf.scalar(1) | |
// 주어진 입력에 대한 예측을 출력하는 모델입니다. | |
function predict(input) { | |
return tf.tidy(() => { | |
const hidden = input.matMul(weights).add(biases).relu() | |
const hidden2 = hidden.matMul(weights2).add(biases2).relu() | |
const out = hidden2.matMul(outWeights).add(outBias).sigmoid().as1D() | |
return out | |
}); | |
} | |
// 모델 예측과 실제 라벨의 오차를 구하는 손실을 구하는 함수입니다. | |
function loss(prediction, actual) { | |
// 올바른 오류 측정 방법을 사용하는 것이 중요합니다. | |
return tf.tidy(() => { | |
return tf.add( | |
actual.mul(prediction.add(epsilon).log()), | |
one.sub(actual).mul(one.sub(prediction).add(epsilon).log())) | |
.mean() | |
.neg().asScalar() | |
}); | |
} | |
// 비동기로 모델을 학습시키는 함수입니다. | |
async function train(numIterations, done) { | |
for (let iter = 0; iter < numIterations; iter++) { | |
let xs, ys, cost | |
[xs, ys] = getNRandomSamples(batch_size) | |
cost = tf.tidy(() => { | |
cost = optimizer.minimize(() => { | |
const pred = predict(tf.tensor2d(xs)) | |
const pretfoss = loss(pred, tf.tensor1d(ys)) | |
return pretfoss | |
}, true) | |
return cost | |
}) | |
if (iter % 10 == 0) { | |
await cost.data().then((data) => console.log(`Iteration: ${iter} Loss: ${data}`)) | |
} | |
await tf.nextFrame() | |
} | |
done() | |
} | |
// 모델의 정확도를 계산하는 함수입니다. | |
function test(xs, ys) { | |
tf.tidy(() => { | |
const predictedYs = xs.map((x) => Math.round(predict(tf.tensor2d(x, [1, 2])).dataSync())); | |
let predicted = 0 | |
for (let i = 0; i < xs.length; i++) { | |
if (ys[i] == predictedYs[i]) { | |
predicted++ | |
} | |
} | |
console.log(`Num correctly predicted: ${predicted} out of ${xs.length}`) | |
console.log(`Accuracy: ${predicted/xs.length}`) | |
}) | |
} | |
// 랜덤 샘플과 그에 상응하는 라벨을 반환하는 함수입니다. | |
function getRandomSample() { | |
let x | |
x = [Math.random()*2-1, Math.random()*2-1] | |
let y | |
if (x[0] > 0 && x[1] > 0 || x[0] < 0 && x[1] < 0) { | |
y = 0 | |
} else { | |
y = 1 | |
} | |
return [x, y] | |
} | |
// 랜덤 샘플을 반환하는 함수입니다. | |
function getNRandomSamples(n) { | |
let xs = [] | |
let ys = [] | |
for (let iter = 0; iter < n; iter++) { | |
let x, y | |
[x, y] = getRandomSample() | |
xs.push(x) | |
ys.push(y) | |
} | |
return [xs, ys] | |
} | |
let testX, testY; | |
[testX, testY] = getNRandomSamples(100) | |
// 학습 전 신경망 테스트를 실행합니다. | |
console.log(`Before training: `) | |
test(testX, testY) | |
console.log('=============') | |
console.log(`Training ${num_iterations} epochs...`) | |
// 학습 후 신경망 테스트를 실행합니다. | |
train(num_iterations, () => { | |
console.log('=============') | |
console.log('After training:') | |
test(testX, testY) | |
}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Before training:
Num correctly predicted: 65 out of 100
pen.js:82 Accuracy: 0.65
pen.js:119 =============
pen.js:120 Training 100 epochs...
pen.js:61 Iteration: 0 Loss: 0.5020701885223389
pen.js:61 Iteration: 10 Loss: 0.21876171231269836
pen.js:61 Iteration: 20 Loss: 0.10506289452314377
pen.js:61 Iteration: 30 Loss: 0.1812034547328949
pen.js:61 Iteration: 40 Loss: 0.11704158782958984
pen.js:61 Iteration: 50 Loss: 0.13397355377674103
pen.js:61 Iteration: 60 Loss: 0.07540464401245117
pen.js:61 Iteration: 70 Loss: 0.21024510264396667
pen.js:61 Iteration: 80 Loss: 0.14096447825431824
pen.js:61 Iteration: 90 Loss: 0.13317514955997467
pen.js:124 =============
pen.js:125 After training:
pen.js:81 Num correctly predicted: 95 out of 100
pen.js:82 Accuracy: 0.95