Last active
February 16, 2025 15:27
-
-
Save rajivmehtaflex/0b885ade2790b42912b9c55dcca2e791 to your computer and use it in GitHub Desktop.
smolagents - Example
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# Install required packages | |
#pip install uv aider-chat google-generativeai smolagents | |
# Create directories | |
mkdir -p smol_exp aider_exp/references | |
# Change directory to aider_exp | |
cd aider_exp | |
# Create .aider.conf.yml file | |
cat << EOF > .aider.conf.yml | |
# CLI for this config file -> aider -c .aider.conf.yml --read $(ls references/*.txt) | |
# openai-api-key: <KEY> | |
# openai-api-base: https://openrouter.ai/api/v1 | |
# model: openai/meta-llama/llama-3.3-70b-instruct | |
# weak-model: openai/meta-llama/Meta-Llama-3.1-8B-Instruct | |
# editor-model: openai/Qwen/Qwen2.5-72B-Instruct | |
# model: groq/llama-3.3-70b-versatile | |
# architect: false | |
# api-key: | |
# - groq=<KEY> | |
# weak-model: groq/llama-3.1-70b-versatile | |
# editor-model: groq/llama-3.1-70b-versatile | |
#------------ALTERNATE-CONFIG------------------ | |
# model: gemini/gemini-2.0-flash-thinking-exp | |
# architect: true | |
# editor-model: gemini/gemini-2.0-flash | |
# weak-model: gemini/gemini-2.0-flash-lite-preview-02-05 | |
# watch-files: true | |
# api-key: | |
# - gemini=<KEY> # Replace with your actual Gemini API key | |
# show-model-warnings: false | |
# map-tokens: 2048 | |
# map-refresh: auto | |
# yes-always: true | |
# suggest-shell-commands: true | |
#---------------------------------------------- | |
model: openrouter/meta-llama/llama-3.2-3b-instruct:free | |
architect: false | |
api-key: | |
- openrouter=<KEY> | |
weak-model: openrouter/meta-llama/llama-3.2-3b-instruct:free | |
editor-model: openrouter/meta-llama/llama-3.2-3b-instruct:free | |
watch-files: true | |
show-model-warnings: false | |
# Repomap Settings: | |
# Suggested number of tokens to use for repo map, use 0 to disable (default: 1024) | |
map-tokens: 2048 | |
# Control how often the repo map is refreshed. Options: auto, always, files, manual (default: auto) | |
map-refresh: auto | |
EOF | |
echo ".aider.conf.yml file created in aider_exp directory" | |
# aider -c .aider.conf.yml --watch-files /content/smolagents_exp/*.py --> CLI for call Aider |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain_openai import ChatOpenAI | |
model=ChatOpenAI( | |
temperature=0.5, | |
model='codellama/CodeLlama-34b-Instruct-hf', | |
base_url='https://api-inference.huggingface.co/v1/', | |
api_key='<KEY>' | |
) | |
response=model.invoke(input=[{"role": "user", "content": "What is color of flamingo?"}]) | |
print(response.content) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# pip install smolagents | |
from smolagents.agents import ToolCallingAgent | |
from smolagents import tool, HfApiModel, TransformersModel, LiteLLMModel | |
from typing import Optional | |
import os | |
os.environ["OPENAI_API_KEY"]= "<KEY>" | |
os.environ["GEMINI_API_KEY"] = "<KEY>" | |
os.environ['MISTRAL_API_KEY'] = '<KEY>' | |
os.environ['DEEPSEEK_API_KEY'] = '<KEY>' | |
# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' | |
# model = LiteLLMModel(model_id='gemini/gemini-2.0-flash-exp') | |
# model = LiteLLMModel(model_id='mistral/mistral-large-latest') | |
model = LiteLLMModel(model_id='deepseek/deepseek-chat') | |
### Straight forward usages #### | |
messages = [ | |
{"role": "user", "content": "What is color of flamingo?"}, | |
] | |
print(model(messages=messages)) | |
######## | |
### Agentic mode of usages #### | |
@tool | |
def get_weather(location: str, celsius: Optional[bool] = False) -> str: | |
""" | |
Get weather in the next days at given location. | |
Secretly this tool does not care about the location, it hates the weather everywhere. | |
Args: | |
location: the location | |
celsius: the temperature | |
""" | |
return "The weather is UNGODLY with torrential rains and temperatures below -10°C" | |
agent = ToolCallingAgent(tools=[get_weather], model=model) | |
print(agent.run("What's the weather like in Paris?")) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def stream_agent_response(agent: CodeAgent, prompt: str) -> Generator[Dict, None, None]: | |
# First yield the thinking message | |
yield { | |
"role": "assistant", | |
"content": "Let me think about that...", | |
"metadata": {"title": "🤔 Thinking"} | |
} | |
# Run the agent and capture its response | |
try: | |
# Get the agent's response | |
for step in agent.run(prompt, stream=True): | |
if isinstance(step, ActionStep): | |
# Show LLM output if present (as collapsible thought) | |
if step.llm_output: | |
yield { | |
"role": "assistant", | |
"content": step.llm_output, | |
"metadata": {"title": "🧠 Thought Process"} | |
} | |
# Show tool call if present | |
if step.tool_call: | |
content = step.tool_call.arguments | |
if step.tool_call.name == "python_interpreter": | |
content = f"```python\n{content}\n```" | |
yield { | |
"role": "assistant", | |
"content": str(content), | |
"metadata": {"title": f"🛠️ Using {step.tool_call.name}"} | |
} | |
# Show observations if present | |
if step.observations: | |
yield { | |
"role": "assistant", | |
"content": f"```\n{step.observations}\n```", | |
"metadata": {"title": "👁️ Observations"} | |
} | |
# Show errors if present | |
if step.error: | |
yield { | |
"role": "assistant", | |
"content": str(step.error), | |
"metadata": {"title": "❌ Error"} | |
} | |
# Show final output if present (without metadata to keep it expanded) | |
if step.action_output is not None and not step.error: | |
# Only show the final output if it's actually the last step | |
if step == step.action_output: | |
yield { | |
"role": "assistant", | |
"content": str(step.action_output) | |
} | |
else: | |
# For any other type of step output | |
yield { | |
"role": "assistant", | |
"content": str(step), | |
"metadata": {"title": "🔄 Processing"} | |
} | |
except Exception as e: | |
yield { | |
"role": "assistant", | |
"content": f"Error: {str(e)}", | |
"metadata": {"title": "❌ Error"} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from smolagents import ( | |
HfApiModel, | |
CodeAgent, | |
DuckDuckGoSearchTool, | |
VisitWebpageTool | |
) | |
# agent = CodeAgent( | |
# model=HfApiModel(), | |
# tools=[DuckDuckGoSearchTool(), VisitWebpageTool()], | |
# max_steps=10, | |
# verbosity_level=2 | |
# ) | |
# Time to share the agent | |
# agent.push_to_hub("rajivmehtapy/g_agent") | |
executor = CodeAgent( | |
model=HfApiModel(), | |
tools=[], | |
max_steps=10, | |
verbosity_level=2 | |
).from_hub("rajivmehtapy/g_agent",trust_remote_code=True) | |
# executor=agent | |
response=executor.run("Give me list of Nobel prize winners in 2024?") | |
print(response) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from smolagents.agents import ToolCallingAgent,CodeAgent | |
from smolagents import tool, HfApiModel, TransformersModel, LiteLLMModel | |
from typing import Optional | |
import os | |
from PIL import Image | |
os.environ["GEMINI_API_KEY"] = "<key>" | |
model = LiteLLMModel(model_id='gemini/gemini-2.0-flash') | |
agent = CodeAgent(tools=[], | |
model=model, | |
max_steps=6, | |
verbosity_level=2) | |
print(agent.run("What is in the provided image?", images=[Image.open("/content/smolagents_exp/kk.jpg")])) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain_core.tools import tool | |
from smolagents import Tool | |
from smolagents.agents import ToolCallingAgent | |
from smolagents import tool, LiteLLMModel | |
from typing import Optional | |
import os | |
os.environ['DEEPSEEK_API_KEY'] = '<KEY>' | |
@tool | |
def multiply(a: int, b: int) -> int: | |
"""Multiply two numbers. | |
Args: | |
a: The first number. | |
b: The second number. | |
""" | |
print("Multiplying...") | |
return a * b | |
# Use the tool directly, no need to wrap from langchain | |
multiply_tool_lc=multiply | |
model = LiteLLMModel(model_id='deepseek/deepseek-chat') | |
agent = ToolCallingAgent(tools=[multiply_tool_lc], model=model) | |
print(agent.run("what is result of 3456 * 2345?")) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment