Last active
January 4, 2026 01:47
-
-
Save ferryzhou/ea29a4baf38154e14ee1ef62be3a6bf3 to your computer and use it in GitHub Desktop.
Bash Agent using Google GenAI
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python | |
| """ | |
| v0_bash_agent_gemini.py - A new bash agent using the google-genai library. | |
| Based on https://github.com/shareAI-lab/learn-claude-code/blob/main/v0_bash_agent.py. | |
| Generated with antigravity with prompt: "create a new bash agent python file that use google genai library and bash tool." | |
| """ | |
| import os | |
| import sys | |
| import subprocess | |
| from dotenv import load_dotenv | |
| from google import genai | |
| from google.genai import types | |
| load_dotenv() | |
| # Configuration | |
| API_KEY = os.getenv("GEMINI_API_KEY") | |
| MODEL = os.getenv("MODEL_NAME", "gemini-2.5-flash") | |
| if not API_KEY: | |
| sys.exit("Please set GEMINI_API_KEY in .env") | |
| client = genai.Client(api_key=API_KEY) | |
| TOOLS = [{ | |
| "name": "bash", | |
| "description": """Execute shell command. Common patterns: | |
| - Read: cat/head/tail, grep/find/rg/ls, wc -l | |
| - Write: echo 'content' > file, sed -i 's/old/new/g' file | |
| - Subagent: python v0_bash_agent_gemini.py 'task description' (spawns isolated agent, returns summary)""", | |
| "parameters": { | |
| "type": "object", | |
| "properties": {"command": {"type": "string"}}, | |
| "required": ["command"] | |
| } | |
| }] | |
| def execute_bash(command: str) -> str: | |
| """Executes a shell command and returns output.""" | |
| try: | |
| result = subprocess.run(command, shell=True, capture_output=True, text=True) | |
| return result.stdout + result.stderr | |
| except Exception as e: | |
| return f"Execution error: {e}" | |
| def chat(prompt, history=None): | |
| if history is None: | |
| history = [] | |
| # Add user message | |
| history.append({"role": "user", "parts": [{"text": prompt}]}) | |
| while True: | |
| try: | |
| response = client.models.generate_content( | |
| model=MODEL, | |
| contents=history, | |
| config=types.GenerateContentConfig( | |
| system_instruction="You are a helpful bash agent. You can execute shell commands using the 'bash' tool. Always output the command you are running effectively.", | |
| tools=[{"function_declarations": TOOLS}] | |
| ) | |
| ) | |
| except Exception as e: | |
| return f"Error communicating with API: {e}" | |
| if not response.candidates: | |
| return "No response from model." | |
| candidate = response.candidates[0] | |
| content = candidate.content | |
| history.append(content) | |
| # Check for function calls | |
| function_calls = [] | |
| if content.parts: | |
| for part in content.parts: | |
| if part.function_call: | |
| function_calls.append(part.function_call) | |
| if not function_calls: | |
| # Return text response | |
| if not content.parts: | |
| return "" | |
| text_parts = [p.text for p in content.parts if p.text] | |
| return "".join(text_parts) | |
| # Execute tools | |
| tool_outputs = [] | |
| for fc in function_calls: | |
| if fc.name == "bash": | |
| cmd = fc.args["command"] | |
| # print(f"\033[33m[Executing: {cmd}]\033[0m", file=sys.stderr) | |
| result = execute_bash(cmd) | |
| tool_outputs.append({ | |
| "function_response": { | |
| "name": "bash", | |
| "response": {"result": result} | |
| } | |
| }) | |
| # Send tool outputs back to model | |
| history.append({"role": "tool", "parts": tool_outputs}) | |
| if __name__ == "__main__": | |
| if len(sys.argv) > 1: | |
| # Non-interactive mode | |
| print(chat(sys.argv[1])) | |
| else: | |
| # Interactive mode | |
| print("Bash Agent acting as user 'ferryzhou'. Type 'exit' to quit.") | |
| history = [] | |
| while True: | |
| try: | |
| user_input = input("\033[36m>> \033[0m") | |
| if user_input.lower() in ["exit", "quit"]: | |
| break | |
| if not user_input.strip(): | |
| continue | |
| response = chat(user_input, history) | |
| print(response) | |
| except (EOFError, KeyboardInterrupt): | |
| print("\nExiting...") | |
| break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment