Skip to content

Instantly share code, notes, and snippets.

View aribornstein's full-sized avatar

PythicCoder aribornstein

View GitHub Profile
# radiology_eval_trial_code.py
# Reproducible trial comparing independent vs combined LLM-as-judge evaluation.
import numpy as np
import matplotlib.pyplot as plt
from math import log2
rng = np.random.default_rng(20250817)
N = 200_000
from autogen_core import CancellationToken
from pydantic import BaseModel
from autogen_core.models import ChatCompletionClient, CreateResult, SystemMessage, UserMessage, AssistantMessage
from autogen_core.tools import Tool
from llama_cpp import Llama
from typing import List, Dict, Any, Literal, Optional, Sequence, Union, AsyncGenerator
import json
class ComponentModel(BaseModel):
provider: str
# Export To Onnx
model.to_onnx(filepath, input_sample, export_params=True)
# Export to Torch Script
torch.jit.save(model.to_torchscript(), "model.pt")
Trainer(num_gpus=32).predict(millions_of_reviews)
prediction = model.predict("This movie is great!")
# Save Checkpoint
trainer.save_checkpoint("text_class_model.pt")
# Load Model From Checkpoint
model = TextClassifier.load_from_checkpoint("text_class_model.pt")
trainer = flash.Trainer(gpus=8)
trainer = flash.Trainer(gpus=8, num_nodes=32)
trainer = flash.Trainer(tpu_cores=1)
trainer = flash.Trainer(max_epochs = 1)
trainer.finetune(model, datamodule = datamodule)
model = TextClassifier(num_classes = 2, backbone = 'roberta-base')