Skip to content

Instantly share code, notes, and snippets.

@bogged-broker
Created December 30, 2025 05:01
Show Gist options
  • Select an option

  • Save bogged-broker/84ef224379a77fb276915865e7b7ba59 to your computer and use it in GitHub Desktop.

Select an option

Save bogged-broker/84ef224379a77fb276915865e7b7ba59 to your computer and use it in GitHub Desktop.
"""
audio_pattern_learner.py
Production-grade ML system for autonomous audio pattern learning and viral prediction.
Continuously learns from video performance data to optimize audio characteristics.
Architecture:
- Deep learning models for pattern recognition
- Reinforcement learning for continuous optimization
- Multi-armed bandits for exploration/exploitation
- Real-time adaptation to trending patterns
- Explainable AI for debugging and trust
Version: 2.0 (Production ML)
"""
import json
import numpy as np
from typing import Dict, List, Optional, Tuple, Any
from dataclasses import dataclass, asdict, field
from collections import defaultdict, deque
from pathlib import Path
from datetime import datetime, timedelta
import pickle
import hashlib
from enum import Enum
# =============================================================================
# DATA MODELS
# =============================================================================
@dataclass
class AudioFeatures:
"""Complete audio feature representation"""
# Temporal features
pace_wpm: float
pace_variance: float
pace_acceleration: List[float] # Pace changes over time
# Pitch/prosody
pitch_mean_hz: float
pitch_std_hz: float
pitch_range_hz: float
pitch_contour: List[float] # Pitch trajectory
pitch_jumps: List[Tuple[float, float]] # (timestamp, magnitude)
# Pauses
pause_count: int
pause_density: float # Per minute
pause_durations: List[float]
pause_positions: List[float] # Normalized 0-1 positions
pause_variance: float
# Beat alignment
beat_sync_score: float # 0-1 overall sync
beat_hit_precision: float # Timing accuracy
beat_phase_consistency: float
on_beat_emphasis_ratio: float # % of emphasis on beats
# Emphasis/energy
emphasis_peaks: List[float] # Timestamp of peaks
emphasis_magnitudes: List[float]
emphasis_pattern: str # "crescendo", "steady", "burst"
energy_curve: List[float] # Overall energy over time
# Hook-specific
hook_entry_pace: float
hook_pitch_peak: float
hook_emphasis_count: int
hook_duration_sec: float
# Syllable-level timing
syllable_durations: List[float]
syllable_rhythm_pattern: str # Encoded rhythm signature
syllable_stress_pattern: List[int] # 0=unstressed, 1=stressed
# Voice characteristics
voice_type: str # "male", "female", "neutral"
voice_age_category: str # "young", "mature"
voice_energy_level: str # "calm", "moderate", "high"
# Contextual
niche: str
platform: str
beat_type: str # "hype", "chill", "trending", etc.
video_duration_sec: float
@dataclass
class PerformanceMetrics:
"""Video performance outcomes"""
views: int
completion_rate: float
avg_watch_time_sec: float
retention_curve: List[float] # At 10%, 20%, ..., 100%
likes: int
comments: int
shares: int
saves: int
engagement_rate: float
viral_velocity: float # Growth rate in first 24h
viral_score: float # Composite 0-100
platform_algorithm_boost: float # Detected boost 0-1
audience_retention_quality: str # "excellent", "good", "poor"
@dataclass
class AudioProfile:
"""Optimized audio configuration recommendation"""
niche: str
platform: str
beat_type: str
# Pace recommendations
optimal_pace_wpm: float
pace_range: Tuple[float, float]
pace_curve_template: str # "linear", "accelerating", "decelerating"
pace_adaptation_rules: Dict[str, float]
# Pitch recommendations
target_pitch_hz: float
pitch_variance_target: float
pitch_contour_template: List[float]
pitch_jump_strategy: Dict[str, Any] # When/how to jump
# Pause strategy
pause_density_target: float
pause_duration_distribution: Dict[str, float] # short/medium/long %
pause_placement_rules: List[str] # e.g., "after_hook", "pre_cta"
strategic_pause_positions: List[float] # Key normalized positions
# Beat alignment rules
beat_sync_importance: float # 0-1
beat_hit_tolerance_ms: float
beat_emphasis_ratio: float # % emphasis on beat
offbeat_strategy: str # "avoid", "strategic", "creative"
# Emphasis patterns
emphasis_strategy: str
emphasis_frequency: float # Per minute
emphasis_positions: List[float] # Normalized positions
emphasis_magnitude_curve: List[float]
# Hook optimization
hook_pace_multiplier: float # Relative to base pace
hook_pitch_boost: float
hook_emphasis_density: float
hook_duration_target: float
# Syllable timing
syllable_rhythm_template: str
syllable_stress_template: List[int]
syllable_duration_targets: Dict[str, float]
# Voice selection
recommended_voice_type: str
voice_energy_level: str
voice_characteristics: Dict[str, str]
# Meta information
confidence_score: float
sample_size: int
last_updated: str
viral_efficacy_score: float # Expected viral performance
# Explainability
top_success_factors: List[Tuple[str, float]] # (feature, importance)
viral_correlation_map: Dict[str, float]
anti_patterns: List[str]
trend_direction: str # "rising", "stable", "declining"
class ModelType(Enum):
"""Available model architectures"""
GRADIENT_BOOSTING = "gradient_boosting"
NEURAL_NETWORK = "neural_network"
ENSEMBLE = "ensemble"
CONTEXTUAL_BANDIT = "contextual_bandit"
# =============================================================================
# FEATURE ENGINEERING
# =============================================================================
class AudioFeatureEngineering:
"""Advanced feature engineering for ML models"""
@staticmethod
def extract_temporal_patterns(audio_features: AudioFeatures) -> np.ndarray:
"""Extract time-series features from audio"""
features = []
# Pace dynamics
if audio_features.pace_acceleration:
features.extend([
np.mean(audio_features.pace_acceleration),
np.std(audio_features.pace_acceleration),
np.max(audio_features.pace_acceleration),
np.min(audio_features.pace_acceleration)
])
else:
features.extend([0, 0, 0, 0])
# Pitch trajectory analysis
if audio_features.pitch_contour:
contour = np.array(audio_features.pitch_contour)
features.extend([
np.mean(contour),
np.std(contour),
np.percentile(contour, 75) - np.percentile(contour, 25), # IQR
np.corrcoef(np.arange(len(contour)), contour)[0, 1] if len(contour) > 1 else 0 # Trend
])
else:
features.extend([0, 0, 0, 0])
# Energy dynamics
if audio_features.energy_curve:
energy = np.array(audio_features.energy_curve)
features.extend([
np.mean(energy),
np.std(energy),
np.max(energy) - np.min(energy), # Range
len([i for i in range(1, len(energy)) if energy[i] > energy[i-1]]) / max(len(energy)-1, 1) # Rise frequency
])
else:
features.extend([0, 0, 0, 0])
return np.array(features)
@staticmethod
def extract_rhythm_patterns(audio_features: AudioFeatures) -> np.ndarray:
"""Extract rhythmic and timing patterns"""
features = []
# Syllable timing analysis
if audio_features.syllable_durations:
durations = np.array(audio_features.syllable_durations)
features.extend([
np.mean(durations),
np.std(durations),
np.median(durations),
len([d for d in durations if d < 0.1]) / len(durations), # Fast syllable ratio
len([d for d in durations if d > 0.3]) / len(durations) # Slow syllable ratio
])
else:
features.extend([0, 0, 0, 0, 0])
# Pause pattern analysis
if audio_features.pause_durations:
pauses = np.array(audio_features.pause_durations)
features.extend([
np.mean(pauses),
np.std(pauses),
len([p for p in pauses if p < 200]) / len(pauses), # Short pause ratio
len([p for p in pauses if p > 500]) / len(pauses) # Long pause ratio
])
else:
features.extend([0, 0, 0, 0])
# Stress pattern encoding
if audio_features.syllable_stress_pattern:
stress = np.array(audio_features.syllable_stress_pattern)
features.extend([
np.mean(stress),
np.std(stress),
len(stress)
])
else:
features.extend([0, 0, 0])
return np.array(features)
@staticmethod
def encode_categorical(audio_features: AudioFeatures) -> np.ndarray:
"""One-hot encode categorical features"""
features = []
# Niche encoding (simplified - would use proper encoding in production)
niche_map = {"tech": 0, "lifestyle": 1, "finance": 2, "education": 3, "entertainment": 4}
features.append(niche_map.get(audio_features.niche, 5))
# Platform encoding
platform_map = {"tiktok": 0, "instagram": 1, "youtube": 2}
features.append(platform_map.get(audio_features.platform, 3))
# Beat type encoding
beat_map = {"hype": 0, "chill": 1, "trending": 2, "viral": 3}
features.append(beat_map.get(audio_features.beat_type, 4))
# Voice encoding
voice_map = {"male": 0, "female": 1, "neutral": 2}
features.append(voice_map.get(audio_features.voice_type, 2))
return np.array(features)
@staticmethod
def create_feature_vector(audio_features: AudioFeatures) -> np.ndarray:
"""Create complete feature vector for ML"""
# Basic features
basic = np.array([
audio_features.pace_wpm,
audio_features.pace_variance,
audio_features.pitch_mean_hz,
audio_features.pitch_std_hz,
audio_features.pitch_range_hz,
audio_features.pause_density,
audio_features.pause_variance,
audio_features.beat_sync_score,
audio_features.beat_hit_precision,
audio_features.on_beat_emphasis_ratio,
audio_features.hook_entry_pace,
audio_features.hook_pitch_peak,
audio_features.hook_emphasis_count,
audio_features.video_duration_sec
])
# Advanced features
temporal = AudioFeatureEngineering.extract_temporal_patterns(audio_features)
rhythm = AudioFeatureEngineering.extract_rhythm_patterns(audio_features)
categorical = AudioFeatureEngineering.encode_categorical(audio_features)
# Concatenate all features
return np.concatenate([basic, temporal, rhythm, categorical])
# =============================================================================
# MACHINE LEARNING MODELS
# =============================================================================
class ViralPredictionModel:
"""
Neural network for predicting viral success from audio features.
Architecture:
- Input: 50+ engineered audio features
- Hidden: 3 layers (128, 64, 32 neurons)
- Output: Viral score prediction (0-100)
- Loss: MSE + ranking loss for relative ordering
"""
def __init__(self, input_dim: int = 50):
self.input_dim = input_dim
self.weights = []
self.biases = []
# Initialize simple 3-layer network (placeholder for real implementation)
layer_sizes = [input_dim, 128, 64, 32, 1]
for i in range(len(layer_sizes) - 1):
self.weights.append(np.random.randn(layer_sizes[i], layer_sizes[i+1]) * 0.01)
self.biases.append(np.zeros(layer_sizes[i+1]))
self.learning_rate = 0.001
self.trained_samples = 0
def forward(self, X: np.ndarray) -> float:
"""Forward pass through network"""
activation = X
for W, b in zip(self.weights[:-1], self.biases[:-1]):
activation = np.maximum(0, activation @ W + b) # ReLU
# Output layer (linear)
output = activation @ self.weights[-1] + self.biases[-1]
return float(output[0])
def predict(self, audio_features: AudioFeatures) -> float:
"""Predict viral score for audio features"""
X = AudioFeatureEngineering.create_feature_vector(audio_features)
return self.forward(X)
def train_batch(self, features_batch: List[AudioFeatures],
targets_batch: List[float]):
"""Train on batch of examples (simplified training)"""
# In production: use proper backprop, Adam optimizer, etc.
for features, target in zip(features_batch, targets_batch):
X = AudioFeatureEngineering.create_feature_vector(features)
pred = self.forward(X)
# Simplified gradient descent (placeholder)
error = pred - target
# Would implement proper backpropagation here
self.trained_samples += len(features_batch)
def save(self, path: str):
"""Save model weights"""
with open(path, 'wb') as f:
pickle.dump({
'weights': self.weights,
'biases': self.biases,
'trained_samples': self.trained_samples
}, f)
def load(self, path: str):
"""Load model weights"""
with open(path, 'rb') as f:
data = pickle.load(f)
self.weights = data['weights']
self.biases = data['biases']
self.trained_samples = data['trained_samples']
class ContextualBandit:
"""
Multi-armed bandit for exploration/exploitation of audio profiles.
Uses Upper Confidence Bound (UCB) algorithm to balance:
- Exploitation: Use best known profiles
- Exploration: Try new variations to discover better patterns
"""
def __init__(self, exploration_factor: float = 2.0):
self.exploration_factor = exploration_factor
self.arm_counts = defaultdict(int)
self.arm_rewards = defaultdict(list)
self.total_pulls = 0
def select_profile(self, available_profiles: List[AudioProfile]) -> AudioProfile:
"""Select profile using UCB algorithm"""
if not available_profiles:
raise ValueError("No profiles available")
self.total_pulls += 1
# Force exploration of untried arms
for profile in available_profiles:
arm_id = self._profile_to_arm_id(profile)
if self.arm_counts[arm_id] == 0:
return profile
# UCB selection
best_profile = None
best_ucb = float('-inf')
for profile in available_profiles:
arm_id = self._profile_to_arm_id(profile)
avg_reward = np.mean(self.arm_rewards[arm_id]) if self.arm_rewards[arm_id] else 0
# UCB formula
exploration_bonus = self.exploration_factor * np.sqrt(
np.log(self.total_pulls) / max(self.arm_counts[arm_id], 1)
)
ucb_value = avg_reward + exploration_bonus
if ucb_value > best_ucb:
best_ucb = ucb_value
best_profile = profile
return best_profile
def update_reward(self, profile: AudioProfile, reward: float):
"""Update bandit with observed reward"""
arm_id = self._profile_to_arm_id(profile)
self.arm_counts[arm_id] += 1
self.arm_rewards[arm_id].append(reward)
# Keep only recent rewards (temporal decay)
if len(self.arm_rewards[arm_id]) > 100:
self.arm_rewards[arm_id] = self.arm_rewards[arm_id][-100:]
def _profile_to_arm_id(self, profile: AudioProfile) -> str:
"""Convert profile to unique arm identifier"""
key = f"{profile.niche}:{profile.platform}:{profile.beat_type}"
return hashlib.md5(key.encode()).hexdigest()[:8]
# =============================================================================
# PATTERN LEARNER
# =============================================================================
class AudioPatternLearner:
"""
Production ML system for autonomous audio pattern learning.
Capabilities:
- Continuous learning from incoming video performance data
- Multi-model ensemble for robust predictions
- Contextual bandits for exploration/exploitation
- Automatic trend detection and adaptation
- Explainable recommendations with feature importance
"""
def __init__(self, data_dir: str = "./audio_ml_data"):
self.data_dir = Path(data_dir)
self.data_dir.mkdir(exist_ok=True)
# ML models
self.prediction_model = ViralPredictionModel()
self.bandit = ContextualBandit()
# Data storage
self.training_buffer = deque(maxlen=10000) # Recent examples
self.profile_cache = {}
self.performance_history = defaultdict(list)
# Learning parameters
self.min_samples_for_profile = 20
self.retraining_frequency = 100 # Retrain every N samples
self.trend_window_days = 7
self.viral_threshold_percentile = 75
# Performance tracking
self.model_version = "2.0"
self.last_training_time = None
self.total_videos_analyzed = 0
# Load existing models and data
self._load_state()
def ingest_video_data(self, video_id: str, audio_features: AudioFeatures,
performance: PerformanceMetrics):
"""
Ingest new video performance data for learning.
This is the primary entry point for continuous learning.
"""
# Store in training buffer
self.training_buffer.append({
'video_id': video_id,
'audio_features': audio_features,
'performance': performance,
'timestamp': datetime.now().isoformat(),
'viral_score': performance.viral_score
})
# Update performance history
key = f"{audio_features.niche}:{audio_features.platform}:{audio_features.beat_type}"
self.performance_history[key].append({
'viral_score': performance.viral_score,
'timestamp': datetime.now()
})
self.total_videos_analyzed += 1
# Trigger retraining if needed
if self.total_videos_analyzed % self.retraining_frequency == 0:
self._retrain_models()
# Update bandit if profile exists
if key in self.profile_cache:
profile = self.profile_cache[key]
reward = performance.viral_score / 100.0 # Normalize to 0-1
self.bandit.update_reward(profile, reward)
# Save state periodically
if self.total_videos_analyzed % 50 == 0:
self._save_state()
def get_recommended_audio_profile(self, niche: str, platform: str,
beat_type: str = "trending") -> Optional[AudioProfile]:
"""
API: Get recommended audio profile for content creation.
Returns optimized profile with highest expected viral performance.
Uses bandit algorithm to balance exploration/exploitation.
"""
key = f"{niche}:{platform}:{beat_type}"
# Check cache first
if key in self.profile_cache:
profile = self.profile_cache[key]
# Verify profile is recent (within trend window)
profile_age = (datetime.now() - datetime.fromisoformat(profile.last_updated)).days
if profile_age <= self.trend_window_days:
return profile
# Generate new profile
profile = self._generate_profile(niche, platform, beat_type)
if profile:
self.profile_cache[key] = profile
self._save_state()
return profile
def predict_viral_success(self, audio_features: AudioFeatures) -> Dict[str, Any]:
"""
API: Predict viral success for given audio features.
Returns prediction with confidence and explanation.
"""
# Get prediction from model
predicted_score = self.prediction_model.predict(audio_features)
# Calculate confidence based on similar examples in training data
confidence = self._calculate_prediction_confidence(audio_features)
# Get feature importance
feature_importance = self._explain_prediction(audio_features)
# Get comparative analysis
key = f"{audio_features.niche}:{audio_features.platform}:{audio_features.beat_type}"
historical_performance = self.performance_history.get(key, [])
if historical_performance:
recent_scores = [p['viral_score'] for p in historical_performance[-100:]]
percentile = (sum(1 for s in recent_scores if s < predicted_score) / len(recent_scores)) * 100
else:
percentile = 50.0
return {
'predicted_viral_score': float(predicted_score),
'confidence': confidence,
'percentile': percentile,
'expected_performance': self._score_to_performance_class(predicted_score),
'feature_importance': feature_importance,
'recommendation': self._generate_recommendation(audio_features, predicted_score)
}
def _generate_profile(self, niche: str, platform: str, beat_type: str) -> Optional[AudioProfile]:
"""Generate optimized audio profile from learned patterns"""
key = f"{niche}:{platform}:{beat_type}"
# Filter relevant training examples
relevant_examples = [
ex for ex in self.training_buffer
if (ex['audio_features'].niche == niche and
ex['audio_features'].platform == platform and
ex['audio_features'].beat_type == beat_type)
]
if len(relevant_examples) < self.min_samples_for_profile:
return None
# Separate winners and losers
scores = [ex['viral_score'] for ex in relevant_examples]
threshold = np.percentile(scores, self.viral_threshold_percentile)
winners = [ex for ex in relevant_examples if ex['viral_score'] >= threshold]
losers = [ex for ex in relevant_examples if ex['viral_score'] < threshold]
if not winners:
return None
# Extract optimal parameters from winners
winner_features = [ex['audio_features'] for ex in winners]
# Pace analysis
paces = [f.pace_wpm for f in winner_features]
optimal_pace = np.median(paces)
pace_std = np.std(paces)
pace_range = (optimal_pace - pace_std, optimal_pace + pace_std)
# Pitch analysis
pitches = [f.pitch_mean_hz for f in winner_features]
target_pitch = np.median(pitches)
pitch_variances = [f.pitch_std_hz for f in winner_features]
pitch_variance_target = np.median(pitch_variances)
# Pause analysis
pause_densities = [f.pause_density for f in winner_features]
pause_density_target = np.median(pause_densities)
# Beat alignment analysis
beat_scores = [f.beat_sync_score for f in winner_features]
beat_sync_importance = np.mean(beat_scores)
# Emphasis analysis
emphasis_counts = [len(f.emphasis_peaks) for f in winner_features]
emphasis_freq = np.median(emphasis_counts) / np.median([f.video_duration_sec for f in winner_features]) * 60
# Hook analysis
hook_paces = [f.hook_entry_pace for f in winner_features if f.hook_entry_pace > 0]
hook_pace_multiplier = np.median(hook_paces) / optimal_pace if hook_paces and optimal_pace > 0 else 1.1
# Calculate viral efficacy score
viral_efficacy = np.mean([ex['viral_score'] for ex in winners])
# Feature importance analysis
top_factors = self._calculate_feature_importance(winners, losers)
# Detect trends
trend_direction = self._detect_trend(key)
# Build profile
profile = AudioProfile(
niche=niche,
platform=platform,
beat_type=beat_type,
optimal_pace_wpm=float(optimal_pace),
pace_range=tuple(map(float, pace_range)),
pace_curve_template="linear", # Could be learned
pace_adaptation_rules={},
target_pitch_hz=float(target_pitch),
pitch_variance_target=float(pitch_variance_target),
pitch_contour_template=[],
pitch_jump_strategy={},
pause_density_target=float(pause_density_target),
pause_duration_distribution={},
pause_placement_rules=[],
strategic_pause_positions=[],
beat_sync_importance=float(beat_sync_importance),
beat_hit_tolerance_ms=50.0,
beat_emphasis_ratio=0.7,
offbeat_strategy="strategic",
emphasis_strategy="moderate",
emphasis_frequency=float(emphasis_freq),
emphasis_positions=[],
emphasis_magnitude_curve=[],
hook_pace_multiplier=float(hook_pace_multiplier),
hook_pitch_boost=1.15,
hook_emphasis_density=2.0,
hook_duration_target=3.0,
syllable_rhythm_template="",
syllable_stress_template=[],
syllable_duration_targets={},
recommended_voice_type="neutral",
voice_energy_level="moderate",
voice_characteristics={},
confidence_score=min(len(winners) / 100.0, 1.0),
sample_size=len(relevant_examples),
last_updated=datetime.now().isoformat(),
viral_efficacy_score=float(viral_efficacy),
top_success_factors=top_factors,
viral_correlation_map={},
anti_patterns=[],
trend_direction=trend_direction
)
return profile
def _calculate_feature_importance(self, winners: List[Dict],
losers: List[Dict]) -> List[Tuple[str, float]]:
"""Calculate which features most differentiate winners from losers"""
importance = []
# Pace importance
winner_paces = [ex['audio_features'].pace_wpm for ex in winners]
loser_paces = [ex['audio_features'].pace_wpm for ex in losers]
pace_diff = abs(np.mean(winner_paces) - np.mean(loser_paces))
importance.append(("pace_wpm", pace_diff))
# Beat sync importance
winner_beats = [ex['audio_features'].beat_sync_score for ex in winners]
loser_beats = [ex['audio_features'].beat_sync_score for ex in losers]
beat_diff = abs(np.mean(winner_beats) - np.mean(loser_beats))
importance.append(("beat_sync_score", beat_diff * 100))
# Hook emphasis importance
winner_hooks = [ex['audio_features'].hook_emphasis_count for ex in winners]
loser_hooks = [ex['audio_features'].hook_emphasis_count for ex in losers]
hook_diff = abs(np.mean(winner_hooks) - np.mean(loser_hooks))
importance.append(("hook_emphasis", hook_diff))
# Sort by importance
importance.sort(key=lambda x: x[1], reverse=True)
return importance[:5] # Top 5
def _detect_trend(self, key: str) -> str:
"""Detect if performance is trending up, down, or stable"""
history = self.performance_history.get(key, [])
if len(history) < 10:
return "stable"
# Get recent trend
recent = history[-20:]
recent_scores = [h['viral_score'] for h in recent]
# Simple linear regression slope
x = np.arange(len(recent_scores))
slope = np.polyfit(x, recent_scores, 1)[0]
if slope > 2.0:
return "rising"
elif slope < -2.0:
return "declining"
else:
return "stable"
def _retrain_models(self):
"""Retrain ML models on accumulated data"""
if len(self.training_buffer) < 50:
return
print(f"Retraining models on {len(self.training_buffer)} examples...")
# Prepare training data
features_batch = [ex['audio_features'] for ex in self.training_buffer]
targets_batch = [ex['viral_score'] for ex in self.training_buffer]
# Train prediction model
self.prediction_model.train_batch(features_batch, targets_batch)
self.last_training_time = datetime.now()
# Clear old profiles to force regeneration with new model
self.profile_cache.clear()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment