Skip to content

Instantly share code, notes, and snippets.

@perryism
perryism / llm_factory.py
Last active April 28, 2025 05:23
llm factory to create Gemini or LM Studio client with structured output support.
import os
import logging
logger = logging.getLogger()
from pydantic import BaseModel
def pydantic_model_to_custom_json_schema(model: type[BaseModel], name: str, strict: bool = True) -> dict:
"""
Converts a Pydantic model to a custom JSON schema format.
"""
@perryism
perryism / gemini_structured.py
Created April 9, 2025 21:15
Gemini with structured output using langchain
from langchain_google_genai import ChatGoogleGenerativeAI
import os
llm = ChatGoogleGenerativeAI(
model="gemini-2.0-flash-001",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
)
@perryism
perryism / Dockerfile
Created February 10, 2025 23:46
Extract dependencies from docker and upload to snowflake
FROM python:3.8
WORKDIR /usr/local/lib/python3.8/site-packages
RUN pip install evidently
@perryism
perryism / dummy_statsd.py
Created November 22, 2024 23:52
A local statsd stub
from datetime import datetime
from contextlib import contextmanager
class DummyStatsd:
def increment(self, event, tags=None):
print("*" * 80)
print(f"DummyStatsd.increment(event={event}, tags={tags})")
print("*" * 80)
def histogram(self, event, value, tags=None):
@perryism
perryism / install_pygraphviz.sh
Created November 22, 2024 17:30
install pygraphviz on mac
brew install graphviz
export CFLAGS="-I $(brew --prefix graphviz)/include"
export LDFLAGS="-L $(brew --prefix graphviz)/lib"
pip install pygraphviz
@perryism
perryism / delegator.py
Last active August 23, 2024 18:01
A delegator
from typing import Any
import unittest
class Delegator:
def __init__(self, instance, method_name):
self.instance = instance
self.method_name = method_name
def __getattr__(self, name):
if name == self.method_name:
@perryism
perryism / chrome_finder.py
Created June 4, 2024 02:24
get matching chrome driver
import json
import sys
import os
import urllib.request
match = sys.argv[1]
filepath = "https://googlechromelabs.github.io/chrome-for-testing/known-good-versions-with-downloads.json"#sys.argv[2]
def get_versions(filepath):
if os.path.exists(filepath):
@perryism
perryism / custom_model_garden_client.py
Last active February 20, 2024 20:19
custom model garden client
from langchain.callbacks.manager import AsyncCallbackManager
import os, logging
logger = logging.getLogger(__name__)
stream_manager = AsyncCallbackManager([])
from langchain_google_vertexai import VertexAIModelGarden
from typing import Any, List, Optional
"""
I found myself waiting for LLM model to give me result. My result is always the same since I am working on the lower level library.
Example
@memoize("test.pkl")
def sum(x, y):
return x + y
"""
@perryism
perryism / Dockerfile
Created February 10, 2024 19:58
cuda dockerfile for llm
FROM nvidia/cuda:11.2.2-runtime-ubuntu20.04
RUN apt-get update --yes --quiet
RUN apt install software-properties-common -y && add-apt-repository ppa:deadsnakes/ppa -y && \
apt-get install -y python3.10 \
pip \
python3.10-distutils \
curl
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10