Created
May 28, 2025 23:04
-
-
Save schipiga/031826998d4527c5da50b71b197dfa71 to your computer and use it in GitHub Desktop.
Retaining AI Agent Tools Experience with LLM Embeddings
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| from mcp import ClientSession, StdioServerParameters | |
| from mcp.client.stdio import stdio_client | |
| from beeai_framework.agents.react import ReActAgent | |
| from beeai_framework.backend import ChatModel | |
| from beeai_framework.memory import UnconstrainedMemory | |
| from beeai_framework.backend.message import SystemMessage | |
| from beeai_framework.tools.mcp import MCPTool | |
| from beeai_framework.errors import FrameworkError | |
| from openai import OpenAI | |
| import numpy as np | |
| from rich import print | |
| server_params = StdioServerParameters( | |
| command="npx", | |
| args=["-y", "github:schipiga/mcp-server"], | |
| env={"PIPEDRIVE_API_KEY": os.getenv("PIPEDRIVE_API_KEY"), "PATH": os.getenv("PATH")}, | |
| ) | |
| def process_agent_events(data, event) -> None: | |
| """Process agent events and log appropriately""" | |
| if event.name == "error": | |
| print("Agent π€ : ", FrameworkError.ensure(data.error).explain()) | |
| elif event.name == "retry": | |
| print("Agent π€ : ", "retrying the action...") | |
| elif event.name == "update": | |
| if data.update.key == "tool_name": | |
| used_tool_names.add(data.update.value) | |
| print(f"Agent({data.update.key}) π€ : ", data.update.parsed_value) | |
| elif event.name == "start": | |
| print("Agent π€ : ", "starting new iteration") | |
| elif event.name == "success": | |
| print("Agent π€ : ", "success") | |
| def observer(emitter) -> None: | |
| emitter.on("*.*", process_agent_events) | |
| async def run(): | |
| async with stdio_client(server_params) as (read, write): | |
| async with ClientSession(read, write) as session: | |
| await session.initialize() | |
| memory = UnconstrainedMemory() | |
| await memory.add(SystemMessage("You are Pipedrive AI bot. Your goal is to give answer to user requests using the results of executed tools.")) | |
| llm = ChatModel.from_name("openai:gpt-4o-mini") | |
| tools = await MCPTool.from_client(session) | |
| agent = ReActAgent(llm=llm, tools=tools, memory=memory) | |
| for prompt in ( | |
| "Get all info about my user and his permissions roles and settings", | |
| "Get all info about my user", | |
| "What are my user roles?" | |
| ): | |
| used_tool_names.clear() | |
| response = await agent.run(prompt=prompt).observe(observer) | |
| print("[bold]Agent:[/bold]", f"[yellow]{response.result.text}[/yellow]") | |
| add_experience(prompt) | |
| print_experienced_tools("What are details about my user account?") | |
| print_experienced_tools("How many opened deals do I have?") | |
| client = OpenAI() | |
| used_tool_names = set() | |
| tools_memo = {} | |
| make_cos_sim = lambda a, b: np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) | |
| def embed(text): | |
| response = client.embeddings.create( | |
| input=text, | |
| model="text-embedding-3-small" | |
| ) | |
| return np.array(response.data[0].embedding) | |
| def merge_experience(a, b, bias=0.5): | |
| m = bias * a + (1 - bias) * b | |
| m /= np.linalg.norm(m) | |
| return m | |
| def add_experience(prompt): | |
| emb_prompt = embed(prompt) | |
| for tool_name in used_tool_names: | |
| if tools_memo.get(tool_name) is None: | |
| tools_memo[tool_name] = emb_prompt | |
| else: | |
| print(tool_name, 'experience merging...') | |
| tools_memo[tool_name] = merge_experience(tools_memo[tool_name], emb_prompt) | |
| def print_experienced_tools(prompt): | |
| emb_prompt = embed(prompt) | |
| experienced_tools = [] | |
| for (name, expr) in tools_memo.items(): | |
| cos_sim = make_cos_sim(emb_prompt, expr) | |
| print('similarity of', name, 'is', cos_sim) | |
| if cos_sim > 0.5: | |
| experienced_tools.append(name) | |
| print(f"[bold]Basing on experience recommend next tools for prompt '{prompt}':[/bold]", f"[magenta]{experienced_tools}[/magenta]") | |
| if __name__ == "__main__": | |
| import asyncio | |
| asyncio.run(run()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment