Skip to content

Instantly share code, notes, and snippets.

@prappo
Last active August 27, 2025 08:30
Show Gist options
  • Select an option

  • Save prappo/056fdee2e801e0ad0b0ec632ed89ac03 to your computer and use it in GitHub Desktop.

Select an option

Save prappo/056fdee2e801e0ad0b0ec632ed89ac03 to your computer and use it in GitHub Desktop.
Simple neural network in pure C
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
// Sigmoid activation function
float sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}
// Prediction function with sigmoid activation
float predict(float temp, float humi, float w1, float w2, float bias) {
float raw_output = w1 * temp + w2 * humi + bias;
return sigmoid(raw_output);
}
// Training function with proper gradient descent
void train(float *w1, float *w2, float *bias) {
float lr = 0.01;
int epochs = 1000; // Changed to int and increased epochs
// Training data: [temperature, humidity, label]
// 0 = not good day to go outside, 1 = good day to go outside
float training_data[4][3] = {
{73, 46, 0}, // High temp, low humidity -> not good
{91, 85, 1}, // High temp, high humidity -> good
{87, 41, 0}, // High temp, low humidity -> not good
{71, 66, 1} // Moderate temp, moderate humidity -> good
};
printf("Starting training...\n");
for (int epoch = 0; epoch < epochs; epoch++) {
float total_loss = 0.0;
// Process each training sample
for (int j = 0; j < 4; j++) {
float temp = training_data[j][0];
float humi = training_data[j][1];
float expected = training_data[j][2];
// Forward pass: make prediction
float prediction = predict(temp, humi, *w1, *w2, *bias);
// Calculate error (loss)
float error = expected - prediction;
total_loss += error * error; // Squared error for monitoring
// Backward pass: calculate gradients
// For sigmoid: derivative = output * (1 - output)
float sigmoid_derivative = prediction * (1 - prediction);
// Update weights using gradient descent
*w1 += lr * error * temp * sigmoid_derivative;
*w2 += lr * error * humi * sigmoid_derivative;
*bias += lr * error * sigmoid_derivative;
}
// Print progress every 100 epochs
if ((epoch + 1) % 100 == 0) {
printf("Epoch %d, Average Loss: %.6f\n", epoch + 1, total_loss / 4.0);
}
}
}
// Function to test the model with various inputs
void test_model(float w1, float w2, float bias) {
printf("\n--- Model Testing ---\n");
// Test cases
float test_cases[][2] = {
{25.0, 60.0}, // Mild temp, moderate humidity
{35.0, 80.0}, // High temp, high humidity
{15.0, 40.0}, // Low temp, low humidity
{30.0, 30.0}, // Moderate temp, low humidity
{20.0, 90.0}, // Low temp, high humidity
{40.0, 50.0} // High temp, moderate humidity
};
int num_tests = sizeof(test_cases) / sizeof(test_cases[0]);
for (int i = 0; i < num_tests; i++) {
float temp = test_cases[i][0];
float humi = test_cases[i][1];
float prediction = predict(temp, humi, w1, w2, bias);
printf("Temp: %.1f°C, Humidity: %.1f%% -> Prediction: %.3f ",
temp, humi, prediction);
if (prediction > 0.5) {
printf("(Good day to go outside!)\n");
} else {
printf("(Not a good day to go outside)\n");
}
}
}
int main() {
// Initialize weights and bias
float w1 = 0.1, w2 = 0.1, bias = 0.0;
printf("=== Neural Network Weather Predictor ===\n");
printf("Initial weights and bias: w1=%.4f, w2=%.4f, bias=%.4f\n\n",
w1, w2, bias);
// Train the model
train(&w1, &w2, &bias);
printf("\nTraining completed!\n");
printf("Final weights and bias: w1=%.6f, w2=%.6f, bias=%.6f\n",
w1, w2, bias);
// Test the trained model
test_model(w1, w2, bias);
// Interactive testing
printf("\n--- Interactive Testing ---\n");
printf("Enter temperature and humidity (or -1 -1 to exit):\n");
float temp, humi;
while (1) {
printf("Temperature (°C): ");
scanf("%f", &temp);
printf("Humidity (%%): ");
scanf("%f", &humi);
if (temp == -1 && humi == -1) {
break;
}
float prediction = predict(temp, humi, w1, w2, bias);
printf("Prediction: %.3f -> %s\n\n",
prediction,
prediction > 0.5 ? "Good day to go outside!" : "Not a good day to go outside");
}
printf("Goodbye!\n");
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment