Skip to content

Instantly share code, notes, and snippets.

View robintux's full-sized avatar

Abraham Zamudio Chauca robintux

  • GMMNS
  • Lima - Peru
View GitHub Profile
<!DOCTYPE html>
<html lang="es">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>El Perceptrón: El Big Bang de la IA</title>
<script src="https://cdn.tailwindcss.com"></script>
<!-- Lucide Icons -->
<script src="https://unpkg.com/lucide@latest"></script>
<!-- KaTeX for Math -->
"""
Demostración empírica de la descomposición sesgo-varianza
Simulación de Monte Carlo para estimar cada componente
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# ============================================================================
# DEMOSTRACIÓN: Aproximación Universal con Keras
# ============================================================================
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
function poisson_2d_analytical(x, y, f0; n_terms=50)
    phi = zeros(length(x), length(y))
    for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
        sum_val = 0.0
        for m in 1:2:n_terms  # Solo términos impares
            for n in 1:2:n_terms  # Solo términos impares
                coef = (16 * f0) /^4 * m * n * (m^2 + n^2))
 sum_val += coef * sin(m * π * xi) * sin(n * π * yj)
def generar_respuesta(prompt, max_length=200, num_beams=4):
"""Genera respuesta del modelo"""
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=max_length,
min_length=20,
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# Configuración de estilo para publicaciones académicas
plt.style.use('seaborn-v0_8-whitegrid')
@robintux
robintux / utils.py
Last active February 21, 2026 05:20
import numpy as np
import copy
import time
import signal
import matplotlib.pyplot as plt
from graphviz import Digraph
from pandas import DataFrame
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, log_loss
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
class Word2VecSimple:
"""
Implementación simplificada de Word2Vec (Skip-gram) para fines educativos.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from typing import Tuple, Callable
class PowerFunctionOneSampleT:
"""
Calcula y visualiza la función de poder para prueba t de una muestra.
Conexión con Sesión 1:
def temporal_train_test_split(df, predictors, target, test_size=0.2):
"""
División temporal que respeta:
1. Orden cronológico (no leakage)
2. Integridad de series por país (no partir series arbitrariamente)
3. Balance de países entre train/test
"""
# Ordenar por año
df_sorted = df.sort_values('Year').reset_index(drop=True)