Created
February 27, 2026 07:44
-
-
Save KennyVaneetvelde/bc7c117cb60e6f054ddc58e9fa47c274 to your computer and use it in GitHub Desktop.
Quickstart: OpenAI API directly with structured output + timings
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import time | |
| t0 = time.perf_counter() | |
| import json | |
| from pydantic import Field | |
| from openai import OpenAI | |
| from pydantic import BaseModel | |
| from dotenv import load_dotenv | |
| def elapsed(label: str, since: float) -> float: | |
| now = time.perf_counter() | |
| print(f"[{now - since:.3f}s] {label}") | |
| return now | |
| t = elapsed("imports loaded", t0) | |
| load_dotenv() | |
| t = elapsed("dotenv loaded", t0) | |
| class CustomOutputSchema(BaseModel): | |
| """ | |
| docstring for the custom output schema | |
| """ | |
| chat_message: str = Field(..., description="The chat message from the agent.") | |
| suggested_questions: list[str] = Field(..., description="Suggested follow-up questions.") | |
| t = elapsed("CustomOutputSchema defined", t0) | |
| SYSTEM_PROMPT = """You are a helpful assistant. | |
| # Background | |
| - This assistant is knowledgeable, helpful, and suggests follow-up questions. | |
| # Steps | |
| - Analyze the user's input to understand the context and intent. | |
| - Formulate a relevant and informative response. | |
| - Generate 3 suggested follow-up questions for the user. | |
| # Output Instructions | |
| - Provide clear and concise information in response to user queries. | |
| - Conclude each response with 3 relevant suggested questions for the user.""" | |
| t = elapsed("system prompt defined", t0) | |
| client = OpenAI() | |
| t = elapsed("OpenAI client initialized", t0) | |
| # No agent initialization step needed — using OpenAI API directly. | |
| t = elapsed("(no agent init needed)", t0) | |
| if __name__ == "__main__": | |
| user_input = "Tell me about atomic agents framework" | |
| print(f"\nSending: '{user_input}'") | |
| t_request = time.perf_counter() | |
| response = client.responses.parse( | |
| model="gpt-5-mini", | |
| instructions=SYSTEM_PROMPT, | |
| input=[ | |
| {"role": "user", "content": user_input}, | |
| ], | |
| text_format=CustomOutputSchema, | |
| ) | |
| parsed = response.output_parsed | |
| elapsed("client.responses.parse() completed", t_request) | |
| elapsed("total time since script start", t0) | |
| print(f"\nAgent: {parsed.chat_message}") | |
| print("Suggested questions:") | |
| for question in parsed.suggested_questions: | |
| print(f"- {question}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment