Skip to content

Instantly share code, notes, and snippets.

@nahkd123
Created October 25, 2025 18:41
Show Gist options
  • Select an option

  • Save nahkd123/009f5e8165620761f64e9b5e24bfc4a5 to your computer and use it in GitHub Desktop.

Select an option

Save nahkd123/009f5e8165620761f64e9b5e24bfc4a5 to your computer and use it in GitHub Desktop.
An inefficient machine learning algo
interface ModelScope {
readonly input: readonly number[];
param(n: number): readonly number[];
use(model: Model): (input: readonly number[]) => readonly number[];
}
type Model = (scope: ModelScope) => readonly number[];
/**
* Context for forward pass and calculating gradient.
*/
class ModelContext {
#params: number[][] = [];
#trained = false;
constructor(public readonly model: Model) {}
get params(): readonly (readonly number[])[] {
return this.#params;
}
set params(params: readonly (readonly number[])[]) {
this.#params = structuredClone(params) as number[][];
this.#trained = true;
}
apply(input: readonly number[]): readonly number[] {
let paramCounter = 0;
const scope: Partial<ModelScope> = { input };
scope.param = (n) => {
const paramId = paramCounter++;
if (!this.#trained) {
const values = new Array<number>(n).fill(0).map(() => Math.random() * 2 - 1);
this.#params.push(values);
}
return this.#params[paramId];
};
scope.use = (model) => {
return (input) => model({ ...(scope as ModelScope), input });
};
const output = this.model(scope as ModelScope);
this.#trained = true;
return output;
}
#applyWithParams(input: readonly number[], params: readonly (readonly number[])[]): readonly number[] {
let paramCounter = 0;
const scope: Partial<ModelScope> = { input };
scope.param = () => params[paramCounter++];
scope.use = (model) => (input) => model({ ...(scope as ModelScope), input });
return this.model(scope as ModelScope);
}
gradient(input: readonly number[]): readonly (readonly number[])[] {
if (!this.#trained) this.apply(input);
const gradients: number[][] = [];
const h = 1e-6;
for (let i = 0; i < this.#params.length; i++) {
const param = this.#params[i];
const gradient: number[] = [];
gradients.push(gradient);
for (let j = 0; j < param.length; j++) {
const left = this.#params.map((param, ii) => param.map((p, jj) => i == ii && j == jj ? p + h : p));
const right = this.#params.map((param, ii) => param.map((p, jj) => i == ii && j == jj ? p - h : p));
const d = (this.#applyWithParams(input, left)[0] - this.#applyWithParams(input, right)[0]) / (2 * h);
gradient.push(d);
}
}
return gradients;
}
/**
* Gradient descent/ascent.
*
* @param gradient The gradient obtained from `gradient()`
* @param rate The "learning rate" with positive value for gradient ascent and negative value for gradient descent.
*/
adjustParams(gradient: readonly (readonly number[])[], rate: number): void {
for (let i = 0; i < this.#params.length; i++) {
const param = this.#params[i];
for (let j = 0; j < param.length; j++) {
param[j] += gradient[i][j] * rate;
}
}
}
}
/**
* Model a perceptron layer.
*
* @param nOutputs Number of output neurons.
* @param activation Activation function to apply on all outputs.
* @returns A perceptron model.
*/
function perceptron(nOutputs: number, activation: (x: number) => number = (x) => x): Model {
return ({ input, param }) => {
const weights: (readonly number[])[] = [];
const biases = param(nOutputs);
for (let i = 0; i < nOutputs; i++) weights.push(param(input.length));
return weights.map((weights, outputIndex) => (
activation(weights.reduce((v, w, inputIndex) => v + input[inputIndex] * w) + biases[outputIndex])
));
};
}
/**
* Model a mean squared error function for training purpose.
*
* @param model The model.
* @param target The desired output.
*/
function mseLossOf(model: Model, target: readonly number[]): Model {
return ({ input, use }) => {
const result = use(model)(input);
return [result.reduce((a, b, i) => a + (b - target[i]) ** 2, 0)];
};
}
const model: Model = ({ input, use }) => {
const activation = (x: number) => 1 / (1 + Math.exp(-x));
const l1 = use(perceptron(5, activation))(input);
const l2 = use(perceptron(8, activation))(l1);
const l3 = use(perceptron(8, activation))(l2);
const l4 = use(perceptron(8, activation))(l3);
const l5 = use(perceptron(3, activation))(l4);
return l5;
};
const input = [1, 0, 1];
const target = [0, 1, 0];
const evalContext = new ModelContext(model);
const trainContext = new ModelContext(mseLossOf(model, target));
for (let i = 0; i < 10000; i++) {
const gradient = trainContext.gradient(input);
trainContext.adjustParams(gradient, -0.0001);
if ((i % 100) == 0) {
evalContext.params = trainContext.params;
const [loss] = trainContext.apply(input);
const result = evalContext.apply(input);
console.log("Iteration", i, "loss", loss, "result", result);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment