Created
December 30, 2025 18:48
-
-
Save bogged-broker/e8037ae2b0a386e0d213c69ff5789025 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| audio_memory_manager.py | |
| Production-grade memory management system for audio pattern learning and reinforcement. | |
| Implements dynamic decay, pattern prioritization, diversity enforcement, and RL integration. | |
| """ | |
| import json | |
| import time | |
| import numpy as np | |
| from collections import defaultdict | |
| from dataclasses import dataclass, asdict | |
| from typing import Dict, List, Optional, Tuple, Set | |
| from datetime import datetime, timedelta | |
| import hashlib | |
| @dataclass | |
| class AudioPattern: | |
| """Represents a learned audio pattern with full metadata.""" | |
| pattern_id: str | |
| timestamp: float | |
| # Audio features | |
| pace_wpm: float | |
| pitch_variance: float | |
| hook_jump_db: float | |
| pause_timing: List[float] | |
| spectral_centroid: float | |
| emotional_intensity: float | |
| beat_alignment_error: float | |
| # Performance metrics | |
| retention_2s: float | |
| completion_rate: float | |
| replay_rate: float | |
| share_count: int | |
| save_count: int | |
| # Context tags | |
| niche: str | |
| platform: str | |
| beat_type: str | |
| voice_style: str | |
| language: str | |
| music_track: str | |
| trending_beat: bool | |
| # Learning metadata | |
| success_count: int = 0 | |
| failure_count: int = 0 | |
| viral_score: float = 0.0 | |
| decay_factor: float = 1.0 | |
| last_used: float = 0.0 | |
| performance_history: List[float] = None | |
| def __post_init__(self): | |
| if self.performance_history is None: | |
| self.performance_history = [] | |
| def calculate_efficacy_score(self) -> float: | |
| """Calculate viral efficacy score based on performance metrics.""" | |
| # Weighted combination of metrics | |
| base_score = ( | |
| self.retention_2s * 0.3 + | |
| self.completion_rate * 0.25 + | |
| self.replay_rate * 0.2 + | |
| min(self.share_count / 100, 1.0) * 0.15 + | |
| min(self.save_count / 50, 1.0) * 0.1 | |
| ) | |
| # Success rate multiplier | |
| total_uses = self.success_count + self.failure_count | |
| if total_uses > 0: | |
| success_rate = self.success_count / total_uses | |
| base_score *= (0.5 + success_rate) | |
| # Trending boost | |
| if self.trending_beat: | |
| base_score *= 1.3 | |
| # Apply decay factor | |
| return base_score * self.decay_factor | |
| @dataclass | |
| class PatternRecommendation: | |
| """Recommendation for TTS and voice sync engines.""" | |
| pattern_id: str | |
| confidence: float | |
| # Audio parameter recommendations | |
| target_pace_wpm: float | |
| target_pitch_variance: float | |
| hook_timing: List[float] | |
| pause_placements: List[float] | |
| emotional_intensity: float | |
| beat_alignment_guidance: Dict[str, float] | |
| # Context | |
| niche: str | |
| platform: str | |
| beat_type: str | |
| rationale: str | |
| class AudioMemoryManager: | |
| """ | |
| Production memory management system for audio pattern learning. | |
| Implements full RL loop integration with dynamic decay and diversity enforcement. | |
| """ | |
| def __init__( | |
| self, | |
| decay_rate: float = 0.95, | |
| decay_interval_hours: float = 24, | |
| min_pattern_uses: int = 3, | |
| diversity_threshold: float = 0.7, | |
| max_patterns_per_niche: int = 50 | |
| ): | |
| self.decay_rate = decay_rate | |
| self.decay_interval_hours = decay_interval_hours | |
| self.min_pattern_uses = min_pattern_uses | |
| self.diversity_threshold = diversity_threshold | |
| self.max_patterns_per_niche = max_patterns_per_niche | |
| # Memory stores | |
| self.patterns: Dict[str, AudioPattern] = {} | |
| self.pattern_embeddings: Dict[str, np.ndarray] = {} | |
| # Indexing for fast lookup | |
| self.niche_patterns: Dict[str, Set[str]] = defaultdict(set) | |
| self.platform_patterns: Dict[str, Set[str]] = defaultdict(set) | |
| self.beat_patterns: Dict[str, Set[str]] = defaultdict(set) | |
| # Performance tracking | |
| self.global_stats = { | |
| 'total_patterns': 0, | |
| 'active_patterns': 0, | |
| 'deprecated_patterns': 0, | |
| 'total_recommendations': 0 | |
| } | |
| # Replay buffer for high performers | |
| self.replay_buffer: List[str] = [] | |
| self.replay_buffer_size = 100 | |
| # Learning state | |
| self.last_decay_time = time.time() | |
| self.pattern_version = 0 | |
| def _generate_pattern_id(self, pattern_data: Dict) -> str: | |
| """Generate unique pattern ID from audio features.""" | |
| feature_str = f"{pattern_data['pace_wpm']:.2f}_{pattern_data['pitch_variance']:.2f}_" \ | |
| f"{pattern_data['niche']}_{pattern_data['beat_type']}" | |
| return hashlib.md5(feature_str.encode()).hexdigest()[:16] | |
| def _compute_pattern_embedding(self, pattern: AudioPattern) -> np.ndarray: | |
| """Compute feature embedding for pattern similarity comparison.""" | |
| features = [ | |
| pattern.pace_wpm / 200.0, # Normalize to [0, 1] | |
| pattern.pitch_variance, | |
| pattern.hook_jump_db / 20.0, | |
| pattern.spectral_centroid / 5000.0, | |
| pattern.emotional_intensity, | |
| pattern.beat_alignment_error, | |
| len(pattern.pause_timing) / 10.0, | |
| np.mean(pattern.pause_timing) if pattern.pause_timing else 0.0 | |
| ] | |
| return np.array(features) | |
| def _calculate_pattern_similarity(self, emb1: np.ndarray, emb2: np.ndarray) -> float: | |
| """Calculate cosine similarity between pattern embeddings.""" | |
| norm1 = np.linalg.norm(emb1) | |
| norm2 = np.linalg.norm(emb2) | |
| if norm1 == 0 or norm2 == 0: | |
| return 0.0 | |
| return np.dot(emb1, emb2) / (norm1 * norm2) | |
| def record_pattern_success( | |
| self, | |
| pattern_data: Dict, | |
| performance_score: float, | |
| is_success: bool = True | |
| ) -> str: | |
| """ | |
| Record a pattern usage with performance feedback. | |
| Args: | |
| pattern_data: Dictionary containing all pattern features and context | |
| performance_score: Overall performance metric (0-1) | |
| is_success: Whether this pattern achieved target metrics | |
| Returns: | |
| pattern_id: ID of the recorded/updated pattern | |
| """ | |
| pattern_id = self._generate_pattern_id(pattern_data) | |
| if pattern_id in self.patterns: | |
| # Update existing pattern | |
| pattern = self.patterns[pattern_id] | |
| if is_success: | |
| pattern.success_count += 1 | |
| else: | |
| pattern.failure_count += 1 | |
| pattern.performance_history.append(performance_score) | |
| pattern.last_used = time.time() | |
| # Update performance metrics with exponential moving average | |
| alpha = 0.3 # Weight for new observation | |
| pattern.retention_2s = (1 - alpha) * pattern.retention_2s + alpha * pattern_data.get('retention_2s', pattern.retention_2s) | |
| pattern.completion_rate = (1 - alpha) * pattern.completion_rate + alpha * pattern_data.get('completion_rate', pattern.completion_rate) | |
| pattern.replay_rate = (1 - alpha) * pattern.replay_rate + alpha * pattern_data.get('replay_rate', pattern.replay_rate) | |
| # Recalculate viral score | |
| pattern.viral_score = pattern.calculate_efficacy_score() | |
| else: | |
| # Create new pattern | |
| pattern = AudioPattern( | |
| pattern_id=pattern_id, | |
| timestamp=time.time(), | |
| pace_wpm=pattern_data['pace_wpm'], | |
| pitch_variance=pattern_data['pitch_variance'], | |
| hook_jump_db=pattern_data['hook_jump_db'], | |
| pause_timing=pattern_data['pause_timing'], | |
| spectral_centroid=pattern_data['spectral_centroid'], | |
| emotional_intensity=pattern_data['emotional_intensity'], | |
| beat_alignment_error=pattern_data['beat_alignment_error'], | |
| retention_2s=pattern_data['retention_2s'], | |
| completion_rate=pattern_data['completion_rate'], | |
| replay_rate=pattern_data['replay_rate'], | |
| share_count=pattern_data.get('share_count', 0), | |
| save_count=pattern_data.get('save_count', 0), | |
| niche=pattern_data['niche'], | |
| platform=pattern_data['platform'], | |
| beat_type=pattern_data['beat_type'], | |
| voice_style=pattern_data['voice_style'], | |
| language=pattern_data['language'], | |
| music_track=pattern_data.get('music_track', ''), | |
| trending_beat=pattern_data.get('trending_beat', False), | |
| success_count=1 if is_success else 0, | |
| failure_count=0 if is_success else 1, | |
| last_used=time.time() | |
| ) | |
| pattern.viral_score = pattern.calculate_efficacy_score() | |
| pattern.performance_history = [performance_score] | |
| # Store pattern | |
| self.patterns[pattern_id] = pattern | |
| # Compute and store embedding | |
| self.pattern_embeddings[pattern_id] = self._compute_pattern_embedding(pattern) | |
| # Index pattern | |
| self.niche_patterns[pattern.niche].add(pattern_id) | |
| self.platform_patterns[pattern.platform].add(pattern_id) | |
| self.beat_patterns[pattern.beat_type].add(pattern_id) | |
| self.global_stats['total_patterns'] += 1 | |
| # Update replay buffer with high performers | |
| self._update_replay_buffer(pattern_id, pattern.viral_score) | |
| # Enforce diversity in niche | |
| self._enforce_niche_diversity(pattern.niche) | |
| return pattern_id | |
| def _update_replay_buffer(self, pattern_id: str, viral_score: float): | |
| """Maintain replay buffer of top-performing patterns.""" | |
| if pattern_id not in self.replay_buffer: | |
| self.replay_buffer.append(pattern_id) | |
| # Sort by viral score and keep top N | |
| self.replay_buffer.sort( | |
| key=lambda pid: self.patterns[pid].viral_score, | |
| reverse=True | |
| ) | |
| self.replay_buffer = self.replay_buffer[:self.replay_buffer_size] | |
| def _enforce_niche_diversity(self, niche: str): | |
| """Ensure pattern diversity within a niche to avoid overfitting.""" | |
| niche_pattern_ids = list(self.niche_patterns[niche]) | |
| if len(niche_pattern_ids) <= self.max_patterns_per_niche: | |
| return | |
| # Find similar patterns | |
| to_remove = [] | |
| for i, pid1 in enumerate(niche_pattern_ids): | |
| if pid1 in to_remove: | |
| continue | |
| emb1 = self.pattern_embeddings[pid1] | |
| pattern1 = self.patterns[pid1] | |
| for pid2 in niche_pattern_ids[i+1:]: | |
| if pid2 in to_remove: | |
| continue | |
| emb2 = self.pattern_embeddings[pid2] | |
| similarity = self._calculate_pattern_similarity(emb1, emb2) | |
| # If patterns are too similar, keep the better performer | |
| if similarity > self.diversity_threshold: | |
| pattern2 = self.patterns[pid2] | |
| if pattern1.viral_score > pattern2.viral_score: | |
| to_remove.append(pid2) | |
| else: | |
| to_remove.append(pid1) | |
| break | |
| # Remove redundant patterns | |
| for pid in to_remove: | |
| self._deprecate_pattern(pid) | |
| def decay_old_patterns(self) -> int: | |
| """ | |
| Apply time-based decay to all patterns. | |
| Older patterns receive exponentially reduced weights. | |
| Returns: | |
| Number of patterns decayed | |
| """ | |
| current_time = time.time() | |
| hours_since_decay = (current_time - self.last_decay_time) / 3600 | |
| if hours_since_decay < self.decay_interval_hours: | |
| return 0 | |
| decayed_count = 0 | |
| deprecated_ids = [] | |
| for pattern_id, pattern in self.patterns.items(): | |
| # Calculate age in hours | |
| age_hours = (current_time - pattern.timestamp) / 3600 | |
| # Apply exponential decay based on age | |
| decay_periods = age_hours / self.decay_interval_hours | |
| pattern.decay_factor = self.decay_rate ** decay_periods | |
| # Additional decay for unused patterns | |
| hours_since_use = (current_time - pattern.last_used) / 3600 | |
| if hours_since_use > 72: # 3 days | |
| pattern.decay_factor *= 0.8 | |
| # Recalculate viral score with new decay | |
| pattern.viral_score = pattern.calculate_efficacy_score() | |
| decayed_count += 1 | |
| # Mark for deprecation if performance is too low | |
| if pattern.viral_score < 0.1 and pattern.success_count + pattern.failure_count >= self.min_pattern_uses: | |
| deprecated_ids.append(pattern_id) | |
| # Deprecate underperforming patterns | |
| for pid in deprecated_ids: | |
| self._deprecate_pattern(pid) | |
| self.last_decay_time = current_time | |
| self.pattern_version += 1 | |
| return decayed_count | |
| def _deprecate_pattern(self, pattern_id: str): | |
| """Remove pattern from active memory.""" | |
| if pattern_id not in self.patterns: | |
| return | |
| pattern = self.patterns[pattern_id] | |
| # Remove from indices | |
| self.niche_patterns[pattern.niche].discard(pattern_id) | |
| self.platform_patterns[pattern.platform].discard(pattern_id) | |
| self.beat_patterns[pattern.beat_type].discard(pattern_id) | |
| # Remove from replay buffer | |
| if pattern_id in self.replay_buffer: | |
| self.replay_buffer.remove(pattern_id) | |
| # Delete pattern | |
| del self.patterns[pattern_id] | |
| del self.pattern_embeddings[pattern_id] | |
| self.global_stats['deprecated_patterns'] += 1 | |
| def get_active_patterns( | |
| self, | |
| niche: Optional[str] = None, | |
| platform: Optional[str] = None, | |
| beat_type: Optional[str] = None, | |
| min_viral_score: float = 0.3, | |
| limit: int = 20 | |
| ) -> List[AudioPattern]: | |
| """ | |
| Retrieve active patterns matching criteria, sorted by viral score. | |
| Args: | |
| niche: Filter by niche | |
| platform: Filter by platform | |
| beat_type: Filter by beat type | |
| min_viral_score: Minimum viral score threshold | |
| limit: Maximum number of patterns to return | |
| Returns: | |
| List of AudioPattern objects sorted by viral score | |
| """ | |
| # Automatic decay check | |
| self.decay_old_patterns() | |
| # Start with all patterns | |
| candidate_ids = set(self.patterns.keys()) | |
| # Apply filters | |
| if niche: | |
| candidate_ids &= self.niche_patterns[niche] | |
| if platform: | |
| candidate_ids &= self.platform_patterns[platform] | |
| if beat_type: | |
| candidate_ids &= self.beat_patterns[beat_type] | |
| # Filter by viral score and sort | |
| active_patterns = [ | |
| self.patterns[pid] for pid in candidate_ids | |
| if self.patterns[pid].viral_score >= min_viral_score | |
| ] | |
| active_patterns.sort(key=lambda p: p.viral_score, reverse=True) | |
| self.global_stats['active_patterns'] = len(active_patterns) | |
| return active_patterns[:limit] | |
| def get_pattern_recommendations( | |
| self, | |
| niche: str, | |
| platform: str, | |
| beat_type: str, | |
| top_k: int = 3 | |
| ) -> List[PatternRecommendation]: | |
| """ | |
| Generate actionable recommendations for TTS and voice sync engines. | |
| Args: | |
| niche: Target niche | |
| platform: Target platform | |
| beat_type: Target beat type | |
| top_k: Number of recommendations to return | |
| Returns: | |
| List of PatternRecommendation objects | |
| """ | |
| # Get top patterns for context | |
| patterns = self.get_active_patterns( | |
| niche=niche, | |
| platform=platform, | |
| beat_type=beat_type, | |
| limit=top_k * 2 | |
| ) | |
| if not patterns: | |
| # Fallback to broader search | |
| patterns = self.get_active_patterns( | |
| niche=niche, | |
| platform=platform, | |
| limit=top_k * 2 | |
| ) | |
| if not patterns: | |
| # Ultimate fallback to replay buffer | |
| patterns = [self.patterns[pid] for pid in self.replay_buffer[:top_k]] | |
| recommendations = [] | |
| for pattern in patterns[:top_k]: | |
| # Generate beat alignment guidance | |
| beat_guidance = { | |
| 'target_error': pattern.beat_alignment_error * 0.8, # Aim for better | |
| 'hook_placement': 'first_beat' if pattern.hook_jump_db > 10 else 'second_beat', | |
| 'sync_tolerance_ms': 50 if pattern.beat_alignment_error < 0.05 else 100 | |
| } | |
| # Build rationale | |
| rationale_parts = [] | |
| if pattern.viral_score > 0.7: | |
| rationale_parts.append("High viral score") | |
| if pattern.trending_beat: | |
| rationale_parts.append("trending beat") | |
| if pattern.success_count > 10: | |
| rationale_parts.append(f"{pattern.success_count} successes") | |
| rec = PatternRecommendation( | |
| pattern_id=pattern.pattern_id, | |
| confidence=pattern.viral_score, | |
| target_pace_wpm=pattern.pace_wpm, | |
| target_pitch_variance=pattern.pitch_variance, | |
| hook_timing=[0.5, 1.0, 2.5] if pattern.hook_jump_db > 8 else [1.0, 2.0], | |
| pause_placements=pattern.pause_timing, | |
| emotional_intensity=pattern.emotional_intensity, | |
| beat_alignment_guidance=beat_guidance, | |
| niche=pattern.niche, | |
| platform=pattern.platform, | |
| beat_type=pattern.beat_type, | |
| rationale="; ".join(rationale_parts) | |
| ) | |
| recommendations.append(rec) | |
| self.global_stats['total_recommendations'] += len(recommendations) | |
| return recommendations | |
| def analyze_winning_patterns( | |
| self, | |
| niche: str, | |
| min_samples: int = 10 | |
| ) -> Dict: | |
| """ | |
| Analyze characteristics of winning patterns vs losing patterns. | |
| Statistical analysis to detect viral features. | |
| Returns: | |
| Dictionary with analysis results | |
| """ | |
| patterns = self.get_active_patterns(niche=niche, limit=1000) | |
| if len(patterns) < min_samples: | |
| return {'error': 'Insufficient samples for analysis'} | |
| # Split into winners and losers | |
| winners = [p for p in patterns if p.viral_score > 0.6] | |
| losers = [p for p in patterns if p.viral_score < 0.4] | |
| if not winners or not losers: | |
| return {'error': 'Need both winners and losers for comparison'} | |
| def compute_stats(pattern_list, attr): | |
| values = [getattr(p, attr) for p in pattern_list] | |
| return { | |
| 'mean': np.mean(values), | |
| 'std': np.std(values), | |
| 'median': np.median(values), | |
| 'min': np.min(values), | |
| 'max': np.max(values) | |
| } | |
| analysis = { | |
| 'niche': niche, | |
| 'winner_count': len(winners), | |
| 'loser_count': len(losers), | |
| 'features': {} | |
| } | |
| # Analyze key features | |
| features = ['pace_wpm', 'pitch_variance', 'hook_jump_db', | |
| 'emotional_intensity', 'beat_alignment_error'] | |
| for feature in features: | |
| winner_stats = compute_stats(winners, feature) | |
| loser_stats = compute_stats(losers, feature) | |
| # Calculate effect size (Cohen's d) | |
| mean_diff = winner_stats['mean'] - loser_stats['mean'] | |
| pooled_std = np.sqrt((winner_stats['std']**2 + loser_stats['std']**2) / 2) | |
| effect_size = mean_diff / pooled_std if pooled_std > 0 else 0 | |
| analysis['features'][feature] = { | |
| 'winners': winner_stats, | |
| 'losers': loser_stats, | |
| 'effect_size': effect_size, | |
| 'recommendation': 'increase' if effect_size > 0.3 else 'decrease' if effect_size < -0.3 else 'maintain' | |
| } | |
| # Detect viral patterns | |
| viral_patterns = [] | |
| for winner in sorted(winners, key=lambda p: p.viral_score, reverse=True)[:5]: | |
| viral_patterns.append({ | |
| 'pattern_id': winner.pattern_id, | |
| 'viral_score': winner.viral_score, | |
| 'pace_wpm': winner.pace_wpm, | |
| 'hook_jump_db': winner.hook_jump_db, | |
| 'platform': winner.platform, | |
| 'beat_type': winner.beat_type | |
| }) | |
| analysis['top_viral_patterns'] = viral_patterns | |
| return analysis | |
| def get_cross_video_insights(self) -> Dict: | |
| """ | |
| Generate global insights across all videos for cross-learning. | |
| Returns: | |
| Dictionary with platform, niche, and beat-specific insights | |
| """ | |
| insights = { | |
| 'timestamp': datetime.now().isoformat(), | |
| 'total_patterns': len(self.patterns), | |
| 'platform_performance': {}, | |
| 'niche_performance': {}, | |
| 'beat_performance': {}, | |
| 'trending_features': {} | |
| } | |
| # Platform performance | |
| for platform in self.platform_patterns.keys(): | |
| patterns = self.get_active_patterns(platform=platform, limit=100) | |
| if patterns: | |
| insights['platform_performance'][platform] = { | |
| 'avg_viral_score': np.mean([p.viral_score for p in patterns]), | |
| 'top_pace_wpm': np.median([p.pace_wpm for p in patterns[:10]]), | |
| 'pattern_count': len(patterns) | |
| } | |
| # Niche performance | |
| for niche in self.niche_patterns.keys(): | |
| patterns = self.get_active_patterns(niche=niche, limit=100) | |
| if patterns: | |
| insights['niche_performance'][niche] = { | |
| 'avg_viral_score': np.mean([p.viral_score for p in patterns]), | |
| 'dominant_beat': max(set([p.beat_type for p in patterns]), | |
| key=[p.beat_type for p in patterns].count), | |
| 'pattern_count': len(patterns) | |
| } | |
| # Beat performance | |
| for beat in self.beat_patterns.keys(): | |
| patterns = self.get_active_patterns(beat_type=beat, limit=100) | |
| if patterns: | |
| insights['beat_performance'][beat] = { | |
| 'avg_viral_score': np.mean([p.viral_score for p in patterns]), | |
| 'optimal_pace': np.median([p.pace_wpm for p in patterns[:10]]), | |
| 'pattern_count': len(patterns) | |
| } | |
| # Global trending features | |
| all_active = self.get_active_patterns(limit=200) | |
| if all_active: | |
| insights['trending_features'] = { | |
| 'avg_pace_wpm': np.mean([p.pace_wpm for p in all_active]), | |
| 'avg_pitch_variance': np.mean([p.pitch_variance for p in all_active]), | |
| 'avg_hook_jump': np.mean([p.hook_jump_db for p in all_active]), | |
| 'trending_beat_ratio': sum([1 for p in all_active if p.trending_beat]) / len(all_active) | |
| } | |
| return insights | |
| def export_patterns(self, filepath: str): | |
| """Export all patterns to JSON file.""" | |
| export_data = { | |
| 'version': self.pattern_version, | |
| 'timestamp': datetime.now().isoformat(), | |
| 'stats': self.global_stats, | |
| 'patterns': [asdict(p) for p in self.patterns.values()] | |
| } | |
| with open(filepath, 'w') as f: | |
| json.dump(export_data, f, indent=2) | |
| def import_patterns(self, filepath: str): | |
| """Import patterns from JSON file.""" | |
| with open(filepath, 'r') as f: | |
| data = json.load(f) | |
| for pattern_dict in data['patterns']: | |
| pattern = AudioPattern(**pattern_dict) | |
| self.patterns[pattern.pattern_id] = pattern | |
| self.pattern_embeddings[pattern.pattern_id] = self._compute_pattern_embedding(pattern) | |
| # Rebuild indices | |
| self.niche_patterns[pattern.niche].add(pattern.pattern_id) | |
| self.platform_patterns[pattern.platform].add(pattern.pattern_id) | |
| self.beat_patterns[pattern.beat_type].add(pattern.pattern_id) | |
| def get_memory_stats(self) -> Dict: | |
| """Return comprehensive memory statistics.""" | |
| return { | |
| **self.global_stats, | |
| 'pattern_version': self.pattern_version, | |
| 'replay_buffer_size': len(self.replay_buffer), | |
| 'niche_count': len(self.niche_patterns), | |
| 'platform_count': len(self.platform_patterns), | |
| 'beat_type_count': len(self.beat_patterns), | |
| 'avg_pattern_age_hours': np.mean([ | |
| (time.time() - p.timestamp) / 3600 | |
| for p in self.patterns.values() | |
| ]) if self.patterns else 0, | |
| 'avg_viral_score': np.mean([ | |
| p.viral_score for p in self.patterns.values() | |
| ]) if self.patterns else 0 | |
| } | |
| # Example usage and integration | |
| if __name__ == "__main__": | |
| # Initialize manager | |
| manager = AudioMemoryManager( | |
| decay_rate=0.95, | |
| decay_interval_hours=24, | |
| diversity_threshold=0.7 | |
| ) | |
| # Example: Record successful pattern | |
| pattern_data = { | |
| 'pace_wpm': 165.0, | |
| 'pitch_variance': 0.35, | |
| 'hook_jump_db': 12.5, | |
| 'pause_timing': [0.3, 0.5, 0.8, 1.2], | |
| 'spectral_centroid': 2500.0, | |
| 'emotional_intensity': 0.8, | |
| 'beat_alignment_error': 0.03, | |
| 'retention_2s': 0.85, | |
| 'completion_rate': 0.72, | |
| 'replay_rate': 0.15, | |
| 'share_count': 45, | |
| 'save_count': 23, | |
| 'niche': 'motivational', | |
| 'platform': 'tiktok', | |
| 'beat_type': 'phonk', | |
| 'voice_style': 'energetic', | |
| 'language': 'en', | |
| 'music_track': 'trending_phonk_01', | |
| 'trending_beat': True | |
| } | |
| pattern_id = manager.record_pattern_success( | |
| pattern_data=pattern_data, | |
| performance_score=0.82, | |
| is_success=True | |
| ) | |
| print(f"Recorded pattern: {pattern_id}") | |
| # Get recommendations for new video | |
| recommendations = manager.get_pattern_recommendations( | |
| niche='motivational', | |
| platform='tiktok', | |
| beat_type='phonk', | |
| top_k=3 | |
| ) | |
| print(f"\nTop {len(recommendations)} recommendations:") | |
| for i, rec in enumerate(recommendations, 1): | |
| print(f"{i}. Pattern {rec.pattern_id}") | |
| print(f" Confidence: {rec.confidence:.2f}") | |
| print(f" Target pace: {rec.target_pace_wpm:.1f} WPM") | |
| print(f" Rationale: {rec.rationale}") | |
| # Analyze winning patterns | |
| analysis = manager.analyze_winning_patterns('motivational') | |
| if 'error' not in analysis: | |
| print(f"\n=== Pattern Analysis for {analysis['niche']} ===") | |
| print(f"Winners: {analysis['winner_count']}, Losers: {analysis['loser_count']}") | |
| for feature, data in analysis['features'].items(): | |
| print(f"\n{feature}:") | |
| print(f" Winner avg: {data['winners']['mean']:.2f}") | |
| print(f" Loser avg: {data['losers']['mean']:.2f}") | |
| print(f" Recommendation: {data['recommendation']}") | |
| # Get memory stats | |
| stats = manager.get_memory_stats() | |
| print(f"\n=== Memory Stats ===") | |
| for key, value in stats.items(): | |
| print(f"{key}: {value}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment