Skip to content

Instantly share code, notes, and snippets.

@swateek
Created January 14, 2025 03:04
Show Gist options
  • Save swateek/e41ce8750431b3c63c199aa981e98ae7 to your computer and use it in GitHub Desktop.
Save swateek/e41ce8750431b3c63c199aa981e98ae7 to your computer and use it in GitHub Desktop.
Crew AI with Bedrock LLMs
bedrock_agent:
role: >
Bedrock AI Assistant
goal: >
Help users generate responses using AWS Bedrock's LLM.
backstory: >
You are an advanced AI assistant powered by AWS Bedrock. You excel at processing
and generating meaningful insights from textual data.
tools:
- bedrock_tool
[DEFAULT]
OPENAI_API_KEY=a-string-without-any-quotes
[DB]
DB_USERNAME=a-string-without-any-quotes
DB_PASSWORD=a-string-without-any-quotes
DB_HOST=a-string-without-any-quotes
DB_NAME=a-string-without-any-quotes
[AWS]
AWS_REGION=a-string-without-any-quotes
AWS_PROFILE=a-string-without-any-quotes
AWS_ACCESS_KEY_ID=a-string-without-any-quotes
AWS_SECRET_ACCESS_KEY=a-string-without-any-quotes
"""Using Bedrock LLMs"""
import configparser
import yaml
from crewai import LLM, Agent, Crew, Task
CONFIG_FILENAME = ".config.ini"
class ConfigHelper:
"""ConfigHelper"""
def __init__(self, config_filename):
self.config = configparser.ConfigParser()
self.config.read(config_filename)
def get_value_by_section_and_key(self, section, key):
"""get_value_by_section_and_key"""
return self.config.get(section, key)
def get_all_details_of_section(self, section) -> dict:
"""get_all_details_of_section"""
return dict(self.config.items(section))
cfg = ConfigHelper(CONFIG_FILENAME)
default_cfgs = cfg.get_all_details_of_section("DEFAULT")
db_cfgs = cfg.get_all_details_of_section("DB")
aws_cfgs = cfg.get_all_details_of_section("AWS")
llm_bedrock_claude = LLM(
model="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id=aws_cfgs["aws_access_key_id"],
aws_secret_access_key=aws_cfgs["aws_secret_access_key"],
aws_region_name=aws_cfgs["aws_region"],
)
# Import agents and tasks from YAML
# Define file paths for YAML configurations
files = {"agents": "config/agents.yaml", "tasks": "config/tasks.yaml"}
# Load configurations from YAML files
configs = {}
for config_type, file_path in files.items():
with open(file_path, "r") as file:
configs[config_type] = yaml.safe_load(file)
# Assign loaded configurations to specific variables
agents_config = configs["agents"]
tasks_config = configs["tasks"]
# Create Agent objects
agents = [
Agent(
role=agent_data["role"],
goal=agent_data["goal"],
backstory=agent_data["backstory"],
llm=llm_bedrock_claude,
tools=[],
verbose=False,
)
for agent_data in agents_config.values()
]
# Create Task objects
tasks = [
Task(
description=task_data["description"],
expected_output=task_data["expected_output"],
agent=agents[0],
verbose=False,
)
for task_data in tasks_config.values()
]
# Initialize the crew
crew = Crew(
agents=agents,
tasks=tasks,
)
# Kick off the crew
result = crew.kickoff(
inputs={"input_prompt": "Explain quantum computing in simple terms."}
)
print(result)
print("******************* COSTING *******************")
print("Total tokens: ", crew.usage_metrics.total_tokens)
print("Prompt tokens: ", crew.usage_metrics.prompt_tokens)
print("Output tokens: ", crew.usage_metrics.completion_tokens)
generate_response_task:
description: >
Use AWS Bedrock's LLM to generate a response to the given input prompt.
Input Prompt:
{input_prompt}
expected_output: >
A detailed and coherent response from Bedrock's LLM.
@swateek
Copy link
Author

swateek commented Jan 14, 2025

Project directory structure

.
├── config
│   ├── __init__.py
│   ├── agents.yaml
│   └── tasks.yaml
├── config.ini
├── crew_ai_with_bedrock_llm.py
├── pip-requirements.txt

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment