Created
February 27, 2026 07:44
-
-
Save KennyVaneetvelde/bf5a189f62adbdddd9f6fcd0a92c1760 to your computer and use it in GitHub Desktop.
Quickstart: Instructor (no Atomic Agents) with structured output + timings
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import time | |
| t0 = time.perf_counter() | |
| from pydantic import Field | |
| from openai import OpenAI | |
| import instructor | |
| from pydantic import BaseModel | |
| from dotenv import load_dotenv | |
| def elapsed(label: str, since: float) -> float: | |
| now = time.perf_counter() | |
| print(f"[{now - since:.3f}s] {label}") | |
| return now | |
| t = elapsed("imports loaded", t0) | |
| load_dotenv() | |
| t = elapsed("dotenv loaded", t0) | |
| class CustomOutputSchema(BaseModel): | |
| """ | |
| docstring for the custom output schema | |
| """ | |
| chat_message: str = Field(..., description="The chat message from the agent.") | |
| suggested_questions: list[str] = Field(..., description="Suggested follow-up questions.") | |
| t = elapsed("CustomOutputSchema defined", t0) | |
| SYSTEM_PROMPT = """You are a helpful assistant. | |
| # Background | |
| - This assistant is knowledgeable, helpful, and suggests follow-up questions. | |
| # Steps | |
| - Analyze the user's input to understand the context and intent. | |
| - Formulate a relevant and informative response. | |
| - Generate 3 suggested follow-up questions for the user. | |
| # Output Instructions | |
| - Provide clear and concise information in response to user queries. | |
| - Conclude each response with 3 relevant suggested questions for the user.""" | |
| t = elapsed("system prompt defined", t0) | |
| client = instructor.from_openai(OpenAI()) | |
| t = elapsed("OpenAI + instructor client initialized", t0) | |
| # No agent initialization step needed — instructor patches the client directly. | |
| t = elapsed("(no agent init needed)", t0) | |
| if __name__ == "__main__": | |
| user_input = "Tell me about atomic agents framework" | |
| print(f"\nSending: '{user_input}'") | |
| t_request = time.perf_counter() | |
| response = client.chat.completions.create( | |
| model="gpt-5-mini", | |
| response_model=CustomOutputSchema, | |
| messages=[ | |
| {"role": "system", "content": SYSTEM_PROMPT}, | |
| {"role": "user", "content": user_input}, | |
| ], | |
| ) | |
| elapsed("client.chat.completions.create() completed", t_request) | |
| elapsed("total time since script start", t0) | |
| print(f"\nAgent: {response.chat_message}") | |
| print("Suggested questions:") | |
| for question in response.suggested_questions: | |
| print(f"- {question}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment