Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created January 3, 2025 18:02
Show Gist options
  • Save thoraxe/687f0cd2354338a1deeb3037f45686ef to your computer and use it in GitHub Desktop.
Save thoraxe/687f0cd2354338a1deeb3037f45686ef to your computer and use it in GitHub Desktop.
import asyncio
import logging
import json
import subprocess
from dataclasses import dataclass
from devtools import pprint
import colorlog
import httpx
from httpx import AsyncClient
from pydantic import BaseModel, Field
from pydantic_ai import Agent, RunContext
from pydantic_ai.models.vertexai import VertexAIModel
import logfire
logfire.configure(send_to_logfire=False)
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
pre_path = "/home/thoraxe/bin/"
log_format = "%(log_color)s%(asctime)s [%(levelname)s] %(reset)s%(purple)s[%(name)s] %(reset)s%(blue)s%(message)s"
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter(log_format))
logging.basicConfig(level=logging.INFO, handlers=[handler])
logger = logging.getLogger(__name__)
@dataclass
class CLIDependencies:
token: str
class CLIResult(BaseModel):
output: str = Field(description="The output from the CLI command")
# the routing agent chooses to use either the knowledge agent or the retrieval
# agent via their tools
routing_agent = Agent(
"openai:gpt-4o",
name="routing_agent",
system_prompt="""You are a Kubernetes and OpenShift assistant. You should
only answer questions related to OpenShift and Kubernetes. You can retrieve
information from Kubernetes and OpenShift environments using your tools. You
also have a tool that can answer general knowledge questions. Always use the
original user question when calling a tool."""
)
@routing_agent.tool
async def knowledge_tool(ctx: RunContext[str], original_query: str) -> str:
"""A tool for answering general OpenShift and Kubernetes knowledge
questions. Use for obtaining how-to, documentation, and similar answers.
Args:
original_query: the question to get an answer for
"""
r = await knowledge_agent.run(original_query)
pprint(r.all_messages)
return r.data
knowledge_agent = Agent(
"openai:gpt-4o",
name="knowledge_agent",
system_prompt="""You are a Kubernetes and OpenShift assistant. You should
only answer questions related to OpenShift and Kubernetes. You are supposed
to answer general knowledge, how-to, documentation, and other similar
questions about OpenShift and Kubernetes. Prefer OpenShift-specific answers
and try to avoid use of kubectl and other generic Kubernetes knowledge. Assume
the user is always asking questions about Openshift.
""",
)
retrieval_agent = Agent(
"openai:gpt-4o",
name="retrieval_agent",
system_prompt="""You are a Kubernetes and OpenShift assistant. You should
only answer questions related to OpenShift and Kubernetes. You can retrieve
information from Kubernetes and OpenShift environments using your tools. Assume
the user is always asking questions about OpenShift. Use the oc command line
tool and do not use the kubectl tool when describing solutions.""",
)
@retrieval_agent.tool
def get_pod_list(ctx: RunContext[str], namespace: str) -> str:
"""Get the list of pods in a specific namespace.
Args:
namespace: the namespace to get the pod list from
"""
output = subprocess.run(
[pre_path + "oc", "get", "pods", "-n", namespace, "-o", "name"],
capture_output=True,
timeout=2,
)
return output.stdout
# result = agent.run_sync('Where does "hello world" come from?')
# result = retrieval_agent.run_sync(
# "what pods are in the openshift-lightspeed namespace?"
# )
result = routing_agent.run_sync("How do I scale pods automatically?")
print(result.data)
# if __name__ == "__main__":
# asyncio.run(main())
17:49:33.960 routing_agent run prompt=How do I scale pods automatically?
17:49:33.961 preparing model and tools run_step=1
17:49:33.963 model request
2025-01-03 12:49:34,701 [INFO] [httpx] HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
17:49:34.728 handle model response
17:49:34.733 running tools=['knowledge_tool']
17:49:34.735 knowledge_agent run prompt=How do I scale pods automatically?
17:49:34.736 preparing model and tools run_step=1
17:49:34.738 model request
2025-01-03 12:49:43,785 [INFO] [httpx] HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
17:49:43.790 handle model response
...
17:49:43.883 preparing model and tools run_step=2
17:49:43.884 model request
2025-01-03 12:49:50,165 [INFO] [httpx] HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK"
17:49:50.172 handle model response
...
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment