Last active
February 26, 2026 10:28
-
-
Save KennyVaneetvelde/1eab6e1b5a2439cb295481d86156fe89 to your computer and use it in GitHub Desktop.
Atomic Agents Quickstart
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import time | |
| t0 = time.perf_counter() | |
| from pydantic import Field | |
| from openai import OpenAI | |
| import instructor | |
| from atomic_agents import AtomicAgent, AgentConfig, BasicChatInputSchema, BaseIOSchema | |
| from atomic_agents.context import SystemPromptGenerator, ChatHistory | |
| from dotenv import load_dotenv | |
| def elapsed(label: str, since: float) -> float: | |
| now = time.perf_counter() | |
| print(f"[{now - since:.3f}s] {label}") | |
| return now | |
| t = elapsed("imports loaded", t0) | |
| load_dotenv() | |
| t = elapsed("dotenv loaded", t0) | |
| class CustomOutputSchema(BaseIOSchema): | |
| """ | |
| docstring for the custom output schema | |
| """ | |
| chat_message: str = Field(..., description="The chat message from the agent.") | |
| suggested_questions: list[str] = Field(..., description="Suggested follow-up questions.") | |
| t = elapsed("CustomOutputSchema defined", t0) | |
| system_prompt_generator = SystemPromptGenerator( | |
| background=["This assistant is knowledgeable, helpful, and suggests follow-up questions."], | |
| steps=[ | |
| "Analyze the user's input to understand the context and intent.", | |
| "Formulate a relevant and informative response.", | |
| "Generate 3 suggested follow-up questions for the user." | |
| ], | |
| output_instructions=[ | |
| "Provide clear and concise information in response to user queries.", | |
| "Conclude each response with 3 relevant suggested questions for the user." | |
| ] | |
| ) | |
| t = elapsed("SystemPromptGenerator created", t0) | |
| client = instructor.from_openai(OpenAI()) | |
| t = elapsed("OpenAI + instructor client initialized", t0) | |
| agent = AtomicAgent[BasicChatInputSchema, CustomOutputSchema]( | |
| config=AgentConfig( | |
| client=client, | |
| model="gpt-5-mini", | |
| system_prompt_generator=system_prompt_generator, | |
| history=ChatHistory(), | |
| ) | |
| ) | |
| t = elapsed("AtomicAgent initialized", t0) | |
| if __name__ == "__main__": | |
| user_input = "Tell me about atomic agents framework" | |
| print(f"\nSending: '{user_input}'") | |
| t_request = time.perf_counter() | |
| response = agent.run(BasicChatInputSchema(chat_message=user_input)) | |
| elapsed(f"agent.run() completed", t_request) | |
| elapsed(f"total time since script start", t0) | |
| print(f"\nAgent: {response.chat_message}") | |
| print("Suggested questions:") | |
| for question in response.suggested_questions: | |
| print(f"- {question}") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment