Skip to content

Instantly share code, notes, and snippets.

@maciejskorski
Last active April 30, 2025 12:21
Show Gist options
  • Save maciejskorski/f95d333774b685595dc7995614d69d63 to your computer and use it in GitHub Desktop.
Save maciejskorski/f95d333774b685595dc7995614d69d63 to your computer and use it in GitHub Desktop.
patching LLMs api for structured output with Instructor library
import instructor
from openai import OpenAI
from anthropic import Anthropic
from pydantic import BaseModel
class UserInfo(BaseModel):
field1: str
field2: int
# Initialize an AI client with various providers ....
# client = instructor.from_openai(OpenAI())
# model = "gpt-4o-mini"
client = instructor.from_anthropic(Anthropic())
model = "claude-3-5-haiku-20241022"
system_template = "Your task is to extract the information about a user from the prompt text. extract name into field1 and born year into field2"
prompt_template = "The {user} was born in {year} and now is {age} years old"
data = {"user":"Joe", "year":1995, "age":30}
messages = [
{"role": "system", "content": system_template},
{"role": "user", "content": prompt_template.format(**data)}
]
answer = client.chat.completions.create(
model=model,
response_model=UserInfo, # obligatory for <create> method
messages=messages,
max_tokens = 1024 # obligatory for Anthropic
)
print(answer)
usage = answer._raw_response.usage
print(usage.input_tokens,usage.output_tokens)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment