Skip to content

Instantly share code, notes, and snippets.

View secemp9's full-sized avatar
going beyond

Nourdine Lotfi secemp9

going beyond
View GitHub Profile
@secemp9
secemp9 / split_modules.py
Created January 29, 2026 11:00
AST Python splitter to module
#!/usr/bin/env python3
"""
General-purpose AST-based Python module splitter.
Analyzes any monolithic .py file, builds a full dependency graph with structural
fingerprints, auto-discovers natural clusters, and produces split module files.
Capabilities:
- AST parsing with full symbol table (ast + inspect-style signatures)
- Structural hashing (hashlib) to fingerprint each symbol for drift detection
<LLM_JUDGE_SPEC version="1.0" name="AntiLLMY" schema="1">
<mission>Score a passage for LLM-y speak (“slop”), using only the given text. Return a compact diagnosis plus concrete fixes.</mission>
<!-- ===== Regex library (mechanically checkable signs) ===== -->
<regex_library flags="i">
<!-- Tone / puffery / editorializing -->
<pattern id="puffery_words">\b(stunning|breathtaking|must[- ]?(see|visit)|rich (?:cultural )?heritage|enduring(?:\s+legacy)?|nestled|in the heart of|watershed moment|stands as|serves as|is a testament|plays a (?:vital|significant) role|continues to captivate|solidifies)\b</pattern>
<pattern id="editorialize">\b(it'?s (?:important|worth) (?:to note|noting)|no discussion would be complete|this (?:article|section) (?:wouldn'?t|would not) exist without)\b</pattern>
<pattern id="weasel">\b(some (?:critics|observers|commentators) (?:argue|say|believe)|many (?:believe|say)|industry (?:reports|analysts) (?:suggest|say))\b</pattern>
<pattern id="superficial_ing">\b(?:ens
@secemp9
secemp9 / openai_toolcall.py
Created July 23, 2025 00:17
openai toolcalling test
import requests
import json
import time
import copy
from enum import Enum
from typing import Dict, List, Any, Optional, Union, Iterator
from dataclasses import dataclass, field
class ModelErrorType(Enum):
MAX_LENGTH = "1024"
@secemp9
secemp9 / openrouter_toolcall.py
Created July 22, 2025 02:29
openrouter tool calling verify
import requests
import json
import os
import subprocess
import time
from typing import Dict, List, Any, Optional
import uuid
import shutil
class ToolCallingTester:
<output_sequence function="x(i)=2(i)+2" i_start="0" i_end="10000" count="10001">
2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 4
@secemp9
secemp9 / vlm_token_vis_moondream.py
Created May 31, 2025 20:53
attempt at reproducing https://www.groundlight.ai/blog/how-vlm-works-tokens 's interactive vlm demo to visualize tokens on their patch/predicted region on an image
import torch, torch.nn.functional as F
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor
from PIL import Image
REVISION = "2025-04-14" # lock to a known good tag
MODEL_ID = "vikhyatk/moondream2"
device = "cuda" # or "cpu" / bitsandbytes / GGUF, etc.
# 1️⃣ load model + text tokenizer
model = AutoModelForCausalLM.from_pretrained(
@secemp9
secemp9 / vlm_token_vis_llava.py
Created May 31, 2025 20:52
attempt at reproducing https://www.groundlight.ai/blog/how-vlm-works-tokens 's interactive vlm demo to visualize tokens on their patch/predicted region on an image
import torch, torch.nn.functional as F
from transformers import AutoProcessor, LlavaForConditionalGeneration
from PIL import Image
MODEL_ID = "llava-hf/llava-1.5-7b-hf"
device = "cuda"
model = LlavaForConditionalGeneration.from_pretrained(
MODEL_ID, torch_dtype=torch.float16, device_map="auto")
processor = AutoProcessor.from_pretrained(MODEL_ID)
@secemp9
secemp9 / rotation_invariant_cnn.py
Created May 31, 2025 04:21
rotation invariant CNN implementation
# -*- coding: utf-8 -*-
"""
CyCNN implementation for rotation invariant image classification.
Based on "CyCNN: A Rotation Invariant CNN using Polar Mapping and Cylindrical Convolution Layers"
"""
# Core Libraries
import os
import random
import time
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Chips Filter Demo</title>
<style>
body {
@secemp9
secemp9 / autocolor_ps.py
Created May 6, 2025 23:54
autocolor (photoshop) inspired algorithm
def improved_auto_color(image: Image.Image) -> Image.Image:
"""
Improved version of Photoshop-like Auto Color.
It works by:
1. Converting to float and normalizing.
2. Computing per-channel percentiles (e.g., 0.5% and 99.5%).
3. Stretching values between those percentiles to [0, 255].
4. This avoids over-amplifying noise or outliers.
"""
img_array = np.array(image).astype(np.float32)