Last active
May 6, 2016 14:31
-
-
Save bvssvni/9baca5d4165f4cf97ab7a4485ea3f938 to your computer and use it in GitHub Desktop.
A pre-training example for deep learning in Dyon
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
fn sigmoid(x) -> { | |
return 1 / (1 + exp(-x)) | |
} | |
fn layer(inputs, outputs) -> { | |
return [[0; inputs]; outputs] | |
} | |
fn tensor(sizes) -> { | |
res := [] | |
for i len(sizes)-1 { | |
push(mut res, layer(sizes[i], sizes[i + 1])) | |
} | |
return clone(res) | |
} | |
fn sizes_tensor(tensor) -> { | |
res := [] | |
push(mut res, len(tensor[0][0])) | |
for i len(tensor) { | |
push(mut res, len(tensor[i])) | |
} | |
return clone(res) | |
} | |
fn randomize_layer(mut layer) { | |
for i len(layer) { | |
for j len(layer[i]) { | |
layer[i][j] = random() | |
} | |
} | |
} | |
fn randomize_tensor(mut tensor) { | |
for i len(tensor) { | |
for j len(tensor[i]) { | |
for k len(tensor[i][j]) { | |
tensor[i][j][k] = random() | |
} | |
} | |
} | |
} | |
fn run_tensor_input(tensor, input) -> { | |
input := input | |
output := [] | |
for i len(tensor) { | |
m := len(tensor[i]) | |
output = [0; m] | |
for j m { | |
sum := 0 | |
for k len(tensor[i][j]) { | |
sum += tensor[i][j][k] * input[k] | |
} | |
output[j] = clone(sigmoid(sum)) | |
} | |
input = clone(output) | |
} | |
return clone(output) | |
} | |
fn run_tensor_data(tensor, data) -> { | |
res := [] | |
for i len(data) { | |
push(mut res, run(tensor: tensor, input: data[i])) | |
} | |
return clone(res) | |
} | |
fn error_len(a, b) -> { | |
sum := 0 | |
for i len(a) { | |
sum += (a[i] - b[i])^2 | |
} | |
return sqrt(sum) | |
} | |
fn pick_weight(tensor) -> { | |
n := len(tensor) | |
i := floor(random() * n) | |
m := len(tensor[i]) | |
j := floor(random() * m) | |
o := len(tensor[i][j]) | |
k := floor(random() * o) | |
return [clone(i), clone(j), clone(k)] | |
} | |
fn get_tensor_weight(tensor, weight) -> { | |
return clone(tensor[weight[0]][weight[1]][weight[2]]) | |
} | |
fn set_tensor_weight_value(mut tensor, weight, val) { | |
tensor[weight[0]][weight[1]][weight[2]] = clone(val) | |
} | |
fn train_tensor_input_learning_rate(mut tensor, input, learning_rate) -> { | |
eps := 0.0001 | |
for i 10 { | |
w := pick_weight(tensor) | |
val := get(tensor: tensor, weight: w) | |
output := run(tensor: tensor, input: input) | |
error := error_len(output, input) | |
set(tensor: mut tensor, weight: w, value: val + eps) | |
output2 := run(tensor: tensor, input: input) | |
error2 := error_len(output2, input) | |
diff_error := error2 - error | |
abs_error := sqrt(diff_error^2) | |
if abs_error > eps * eps { | |
// Normalize error. | |
diff_error /= eps | |
set(tensor: mut tensor, weight: w, value: val - diff_error * learning_rate) | |
return clone(error) | |
} else { | |
// Reset change. | |
set(tensor: mut tensor, weight: w, value: val) | |
} | |
} | |
return 0 | |
} | |
fn expand_tensor_size(mut tensor, size) { | |
n := len(tensor) | |
outputs := len(tensor[n-1]) | |
outputs_inputs := len(tensor[n-1][0]) | |
pop(mut tensor) | |
hidden_layer := layer(outputs_inputs, size) | |
randomize(layer: mut hidden_layer) | |
push(mut tensor, clone(hidden_layer)) | |
output_layer := layer(size, outputs) | |
randomize(layer: mut output_layer) | |
push(mut tensor, clone(output_layer)) | |
} | |
fn train_data_tensor_iterations(data, mut tensor, iterations) { | |
for i := 0; i < iterations; i += 1 { | |
random_input := data[floor(random() * len(data))] | |
error := train(tensor: mut tensor, input: random_input, learning_rate: 10) | |
if (i % 100) == 0 { | |
output_data := run(tensor: tensor, data: data) | |
print(data: data, output_data: output_data) | |
println(error) | |
println(sizes(tensor: tensor)) | |
println("==---== " + to_string(i)) | |
sleep(0) | |
} | |
} | |
} | |
fn print_data_output_data(data, output_data) { | |
for d len(data) { | |
for i 3 { | |
for j 3 { | |
print(round(data[d][(j + 3 * i)])) | |
print(",") | |
} | |
print(" ") | |
for j 3 { | |
print(round(output_data[d][(j + 3 * i)])) | |
print(",") | |
} | |
equal := true | |
for j 3 { | |
if round(data[d][(j + 3 * i)]) != round(output_data[d][(j + 3 * i)]) { | |
equal = false | |
} | |
} | |
if equal { | |
print(" ") | |
print(equal) | |
} | |
println("") | |
} | |
println("") | |
} | |
} | |
fn plus() -> { | |
return [0, 1, 0, | |
1, 1, 1, | |
0, 1, 0] | |
} | |
fn anti_plus() -> { | |
return [1, 0, 1, | |
0, 0, 0, | |
1, 0, 1] | |
} | |
fn center() -> { | |
return [0, 0, 0, | |
0, 1, 0, | |
0, 0, 0] | |
} | |
fn cross() -> { | |
return [1, 0, 1, | |
0, 1, 0, | |
1, 0, 1] | |
} | |
fn diag1() -> { | |
return [1, 0, 0, | |
0, 1, 0, | |
0, 0, 1] | |
} | |
fn diag2() -> { | |
return [0, 0, 1, | |
0, 1, 0, | |
1, 0, 0] | |
} | |
fn arrow1() -> { | |
return [1, 0, 1, | |
0, 1, 0, | |
0, 0, 0] | |
} | |
fn arrow2() -> { | |
return [1, 0, 0, | |
0, 1, 0, | |
1, 0, 0] | |
} | |
fn arrow3() -> { | |
return [0, 0, 0, | |
0, 1, 0, | |
1, 0, 1] | |
} | |
fn arrow4() -> { | |
return [0, 0, 1, | |
0, 1, 0, | |
0, 0, 1] | |
} | |
fn main() { | |
tensor := tensor([9, 7, 9]) | |
println(tensor) | |
randomize(tensor: mut tensor) | |
data := [plus(), anti_plus(), center(), cross(), diag1(), diag2(), | |
arrow1(), arrow2(), arrow3(), arrow4()] | |
train(data: data, tensor: mut tensor, iterations: 20000) | |
expand(tensor: mut tensor, size: 5) | |
train(data: data, tensor: mut tensor, iterations: 40000) | |
expand(tensor: mut tensor, size: 3) | |
train(data: data, tensor: mut tensor, iterations: 80000) | |
expand(tensor: mut tensor, size: 2) | |
train(data: data, tensor: mut tensor, iterations: 80000) | |
expand(tensor: mut tensor, size: 3) | |
train(data: data, tensor: mut tensor, iterations: 80000) | |
expand(tensor: mut tensor, size: 5) | |
train(data: data, tensor: mut tensor, iterations: 80000) | |
expand(tensor: mut tensor, size: 7) | |
train(data: data, tensor: mut tensor, iterations: 80000) | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment