Created
May 3, 2025 20:28
-
-
Save ibuilder/0c279ccf3372174f009fd9d15b27e361 to your computer and use it in GitHub Desktop.
Unified AI Client for Gemini, Copilot, and Claude for Coding.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Unified AI Client for Gemini, Copilot, and Claude | |
This module provides a unified interface for interacting with multiple AI models: | |
- Google's Gemini API | |
- Anthropic's Claude API | |
- GitHub Copilot (unofficial method) | |
It allows applications to use these services interchangeably or in combination, | |
leveraging each model's strengths for different tasks. | |
Requirements: | |
- google-generativeai (for Gemini) | |
- anthropic (for Claude) | |
- selenium, requests (for unofficial Copilot access) | |
- python-dotenv | |
Usage: | |
1. Set up environment variables for the services you want to use: | |
- GEMINI_API_KEY: Your Gemini API key | |
- ANTHROPIC_API_KEY: Your Anthropic API key | |
- GITHUB_USERNAME: Your GitHub username | |
- GITHUB_TOKEN: Your GitHub personal access token | |
2. Create a UnifiedAIClient instance and interact with one or more models. | |
""" | |
import os | |
import logging | |
import json | |
from typing import Optional, Dict, List, Any, Union, Literal | |
from enum import Enum | |
from dotenv import load_dotenv | |
# Import the individual client classes | |
# These would be imported from your actual module files | |
from gemini_copilot_api import GeminiClient, CopilotClient, AIModel as BaseAIModel | |
from claude_api_integration import ClaudeClient, ClaudeModel | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
) | |
logger = logging.getLogger(__name__) | |
# Load environment variables | |
load_dotenv() | |
class AIProvider(Enum): | |
"""Enum representing different AI providers.""" | |
GEMINI = "gemini" | |
CLAUDE = "claude" | |
COPILOT = "copilot" | |
class UnifiedAIClient: | |
""" | |
Unified client for interacting with multiple AI models from different providers. | |
""" | |
def __init__( | |
self, | |
gemini_api_key: Optional[str] = None, | |
anthropic_api_key: Optional[str] = None, | |
github_username: Optional[str] = None, | |
github_token: Optional[str] = None, | |
default_provider: AIProvider = AIProvider.GEMINI | |
): | |
""" | |
Initialize the unified client. | |
Args: | |
gemini_api_key: The Gemini API key. | |
anthropic_api_key: The Anthropic API key. | |
github_username: GitHub username. | |
github_token: GitHub personal access token. | |
default_provider: The default AI provider to use. | |
""" | |
self.default_provider = default_provider | |
self.available_providers = [] | |
# Initialize clients for each provider if credentials are available | |
self.clients = {} | |
# Initialize Gemini client if API key is available | |
try: | |
self.clients[AIProvider.GEMINI] = GeminiClient(api_key=gemini_api_key) | |
self.available_providers.append(AIProvider.GEMINI) | |
logger.info("Gemini client initialized successfully.") | |
except Exception as e: | |
logger.warning(f"Failed to initialize Gemini client: {e}") | |
# Initialize Claude client if API key is available | |
try: | |
self.clients[AIProvider.CLAUDE] = ClaudeClient(api_key=anthropic_api_key) | |
self.available_providers.append(AIProvider.CLAUDE) | |
logger.info("Claude client initialized successfully.") | |
except Exception as e: | |
logger.warning(f"Failed to initialize Claude client: {e}") | |
# Initialize Copilot client if GitHub credentials are available | |
try: | |
self.clients[AIProvider.COPILOT] = CopilotClient( | |
github_username=github_username, | |
github_token=github_token | |
) | |
self.available_providers.append(AIProvider.COPILOT) | |
logger.info("Copilot client initialized (unofficial method).") | |
except Exception as e: | |
logger.warning(f"Failed to initialize Copilot client: {e}") | |
if not self.available_providers: | |
logger.error("No AI providers could be initialized. Check your credentials.") | |
raise ValueError("No AI providers available. Please provide valid credentials for at least one provider.") | |
# Ensure default provider is available | |
if default_provider not in self.available_providers: | |
self.default_provider = self.available_providers[0] | |
logger.warning(f"Default provider {default_provider} not available. Using {self.default_provider} instead.") | |
def list_available_providers(self) -> List[str]: | |
"""List all available AI providers.""" | |
return [provider.value for provider in self.available_providers] | |
def check_provider_availability(self, provider: AIProvider) -> bool: | |
"""Check if a specific provider is available.""" | |
return provider in self.available_providers | |
def generate_text( | |
self, | |
prompt: str, | |
provider: Optional[AIProvider] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Generate text using the specified or default AI provider. | |
Args: | |
prompt: The text prompt to send. | |
provider: Which AI provider to use. If None, uses the default. | |
**kwargs: Additional parameters for the provider. | |
Returns: | |
Dict containing the response and metadata. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
return { | |
"text": "", | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
# Generate text using the appropriate provider | |
try: | |
if provider == AIProvider.GEMINI: | |
response = self.clients[provider].generate_content(prompt, **kwargs) | |
return { | |
"text": response.get("text", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.CLAUDE: | |
response = self.clients[provider].generate_message( | |
prompt=prompt, | |
system_prompt=kwargs.get("system_prompt"), | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"text": response.get("text", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.COPILOT: | |
language = kwargs.get("language", "python") | |
response = self.clients[provider].generate_code(prompt, language, **kwargs) | |
return { | |
"text": response.get("code", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
return { | |
"text": "", | |
"error": f"Unsupported provider: {provider.value}", | |
"status": "error" | |
} | |
except Exception as e: | |
logger.error(f"Error generating text with {provider.value}: {e}") | |
return { | |
"text": "", | |
"error": str(e), | |
"provider": provider.value, | |
"status": "error" | |
} | |
def generate_code( | |
self, | |
prompt: str, | |
language: str = "python", | |
provider: Optional[AIProvider] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Generate code using the specified or default AI provider. | |
Args: | |
prompt: Description of the code to generate. | |
language: The programming language for the code. | |
provider: Which AI provider to use. If None, uses the default. | |
**kwargs: Additional parameters for the provider. | |
Returns: | |
Dict containing the generated code and metadata. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
return { | |
"code": "", | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
# Generate code using the appropriate provider | |
try: | |
if provider == AIProvider.GEMINI: | |
# Create a prompt specifically for code generation | |
code_prompt = f"Generate {language} code for the following task:\n\n{prompt}\n\nProvide only the code, no explanations." | |
response = self.clients[provider].generate_content(code_prompt, **kwargs) | |
return { | |
"code": response.get("text", ""), | |
"language": language, | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.CLAUDE: | |
# Create a prompt specifically for code generation | |
code_prompt = f"Generate {language} code for the following task:\n\n{prompt}\n\nProvide only the code, no explanations." | |
response = self.clients[provider].generate_message( | |
prompt=code_prompt, | |
system_prompt=f"You are an expert {language} programmer. Your task is to write clean, efficient, and well-documented code. Only provide the code, no explanations or commentary.", | |
temperature=kwargs.get("temperature", 0.2), # Lower temperature for code | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"code": response.get("text", ""), | |
"language": language, | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.COPILOT: | |
response = self.clients[provider].generate_code(prompt, language, **kwargs) | |
return { | |
"code": response.get("code", ""), | |
"language": language, | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
return { | |
"code": "", | |
"error": f"Unsupported provider: {provider.value}", | |
"status": "error" | |
} | |
except Exception as e: | |
logger.error(f"Error generating code with {provider.value}: {e}") | |
return { | |
"code": "", | |
"error": str(e), | |
"language": language, | |
"provider": provider.value, | |
"status": "error" | |
} | |
def multimodal_generate( | |
self, | |
prompt: str, | |
image_path: str, | |
provider: Optional[AIProvider] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Generate content based on text and image inputs. | |
Args: | |
prompt: Text prompt to accompany the image. | |
image_path: Path to the image file. | |
provider: Which AI provider to use. If None, uses default. | |
**kwargs: Additional parameters for the provider. | |
Returns: | |
Dict containing the response and metadata. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
return { | |
"text": "", | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
# Check if image exists | |
if not os.path.exists(image_path): | |
return { | |
"text": "", | |
"error": f"Image file not found: {image_path}", | |
"status": "error" | |
} | |
# Generate multimodal content using the appropriate provider | |
try: | |
if provider == AIProvider.GEMINI: | |
response = self.clients[provider].generate_with_image(prompt, image_path, **kwargs) | |
return { | |
"text": response.get("text", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.CLAUDE: | |
response = self.clients[provider].generate_with_image( | |
prompt=prompt, | |
image_path=image_path, | |
system_prompt=kwargs.get("system_prompt"), | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"text": response.get("text", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
return { | |
"text": "", | |
"error": f"Provider {provider.value} does not support multimodal generation.", | |
"status": "error" | |
} | |
except Exception as e: | |
logger.error(f"Error generating multimodal content with {provider.value}: {e}") | |
return { | |
"text": "", | |
"error": str(e), | |
"provider": provider.value, | |
"status": "error" | |
} | |
def compare_providers( | |
self, | |
prompt: str, | |
providers: Optional[List[AIProvider]] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Compare responses from multiple AI providers for the same prompt. | |
Args: | |
prompt: The prompt to send to providers. | |
providers: List of providers to compare. If None, uses all available. | |
**kwargs: Additional parameters for the providers. | |
Returns: | |
Dict containing responses from each provider and comparison metrics. | |
""" | |
# Use all available providers if none specified | |
if providers is None: | |
providers = self.available_providers | |
else: | |
# Filter out unavailable providers | |
providers = [p for p in providers if p in self.available_providers] | |
if not providers: | |
return { | |
"error": "No available providers to compare.", | |
"status": "error" | |
} | |
# Get responses from each provider | |
responses = {} | |
for provider in providers: | |
response = self.generate_text(prompt, provider, **kwargs) | |
responses[provider.value] = response | |
# Create comparison metrics | |
comparison = { | |
"length_comparison": { | |
provider.value: len(response.get("text", "")) | |
for provider, response in responses.items() | |
}, | |
"status_comparison": { | |
provider.value: response.get("status", "unknown") | |
for provider, response in responses.items() | |
} | |
} | |
return { | |
"responses": responses, | |
"comparison": comparison, | |
"status": "success" | |
} | |
def create_conversation( | |
self, | |
provider: Optional[AIProvider] = None, | |
initial_prompt: Optional[str] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Create a new conversation session. | |
Args: | |
provider: Which AI provider to use. If None, uses default. | |
initial_prompt: Optional initial message to start the conversation. | |
**kwargs: Additional parameters for the conversation. | |
Returns: | |
Dict containing the conversation state and initial response if provided. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
error_message = f"Provider {provider.value} is not available." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
# Create conversation using the appropriate provider | |
try: | |
if provider == AIProvider.GEMINI: | |
response = self.clients[provider].create_chat(initial_prompt) | |
return { | |
"conversation_id": id(response.get("chat", {})), | |
"conversation": response.get("chat", {}), | |
"response": response.get("response", ""), | |
"provider": provider.value, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.CLAUDE: | |
response = self.clients[provider].create_conversation( | |
initial_prompt=initial_prompt, | |
system_prompt=kwargs.get("system_prompt"), | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"conversation_id": id(response.get("conversation", {})), | |
"conversation": response.get("conversation", {}), | |
"response": response.get("response", ""), | |
"provider": provider.value, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
error_message = f"Provider {provider.value} does not support conversations." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
except Exception as e: | |
error_message = f"Error creating conversation with {provider.value}: {e}" | |
logger.error(error_message) | |
return { | |
"error": str(e), | |
"status": "error" | |
} | |
def continue_conversation( | |
self, | |
conversation_data: Dict[str, Any], | |
message: str, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Continue an existing conversation with a new message. | |
Args: | |
conversation_data: Conversation data returned from create_conversation. | |
message: New message to send. | |
**kwargs: Additional parameters for the response. | |
Returns: | |
Dict containing the updated conversation state and response. | |
""" | |
provider_value = conversation_data.get("provider") | |
if not provider_value: | |
error_message = "Invalid conversation data. Provider not specified." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
try: | |
provider = AIProvider(provider_value) | |
except ValueError: | |
error_message = f"Unknown provider: {provider_value}" | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
# Check if provider is available | |
if provider not in self.available_providers: | |
error_message = f"Provider {provider.value} is not available." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
# Continue conversation using the appropriate provider | |
try: | |
if provider == AIProvider.GEMINI: | |
chat = conversation_data.get("conversation") | |
if not chat: | |
error_message = "Invalid conversation data. Chat session not found." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
response = self.clients[provider].continue_chat(chat, message) | |
# Update conversation data | |
conversation_data["response"] = response.get("text", "") | |
return { | |
"conversation_id": conversation_data.get("conversation_id"), | |
"conversation": conversation_data.get("conversation"), | |
"response": response.get("text", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
elif provider == AIProvider.CLAUDE: | |
conversation = conversation_data.get("conversation") | |
if not conversation: | |
error_message = "Invalid conversation data. Conversation not found." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
response = self.clients[provider].continue_conversation( | |
conversation=conversation, | |
message=message, | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"conversation_id": conversation_data.get("conversation_id"), | |
"conversation": response.get("conversation", {}), | |
"response": response.get("response", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
error_message = f"Provider {provider.value} does not support conversations." | |
logger.error(error_message) | |
return { | |
"error": error_message, | |
"status": "error" | |
} | |
except Exception as e: | |
error_message = f"Error continuing conversation with {provider.value}: {e}" | |
logger.error(error_message) | |
return { | |
"error": str(e), | |
"status": "error" | |
} | |
def stream_response( | |
self, | |
prompt: str, | |
provider: Optional[AIProvider] = None, | |
**kwargs | |
): | |
""" | |
Stream a response from the specified provider. | |
Args: | |
prompt: The text prompt to send. | |
provider: Which AI provider to use. If None, uses the default. | |
**kwargs: Additional parameters for the provider. | |
Returns: | |
Generator that yields response chunks as they become available. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
yield { | |
"text": "", | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
return | |
# Stream response using the appropriate provider | |
try: | |
if provider == AIProvider.CLAUDE: | |
# Only Claude supports streaming as of now | |
for chunk in self.clients[provider].stream_message( | |
prompt=prompt, | |
system_prompt=kwargs.get("system_prompt"), | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
): | |
yield { | |
"text": chunk.get("text", ""), | |
"provider": provider.value, | |
"status": chunk.get("status", "streaming") | |
} | |
else: | |
# For non-streaming providers, generate the full response and yield it | |
response = self.generate_text(prompt, provider, **kwargs) | |
yield { | |
"text": response.get("text", ""), | |
"provider": provider.value, | |
"status": "complete", | |
"note": f"{provider.value} does not support streaming. Full response generated." | |
} | |
except Exception as e: | |
logger.error(f"Error streaming response from {provider.value}: {e}") | |
yield { | |
"text": "", | |
"error": str(e), | |
"provider": provider.value, | |
"status": "error" | |
} | |
def generate_with_tools( | |
self, | |
prompt: str, | |
tools: List[Dict], | |
provider: Optional[AIProvider] = None, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Generate a response with tool-calling capabilities. | |
Args: | |
prompt: The text prompt to send. | |
tools: List of tool definitions following the provider's tool specification. | |
provider: Which AI provider to use. If None, uses the default. | |
**kwargs: Additional parameters for the provider. | |
Returns: | |
Dict containing the response and any tool calls made. | |
""" | |
# Use default provider if none specified | |
if provider is None: | |
provider = self.default_provider | |
# Check if provider is available | |
if provider not in self.available_providers: | |
return { | |
"text": "", | |
"tool_calls": [], | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
# Generate with tools using the appropriate provider | |
try: | |
if provider == AIProvider.CLAUDE: | |
# Only Claude supports tools as of now | |
response = self.clients[provider].generate_with_tools( | |
prompt=prompt, | |
tools=tools, | |
system_prompt=kwargs.get("system_prompt"), | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"text": response.get("text", ""), | |
"tool_calls": response.get("tool_calls", []), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
else: | |
return { | |
"text": "", | |
"tool_calls": [], | |
"error": f"Provider {provider.value} does not support tool calls.", | |
"status": "error" | |
} | |
except Exception as e: | |
logger.error(f"Error generating with tools using {provider.value}: {e}") | |
return { | |
"text": "", | |
"tool_calls": [], | |
"error": str(e), | |
"provider": provider.value, | |
"status": "error" | |
} | |
def execute_tool_and_continue( | |
self, | |
conversation_data: Dict[str, Any], | |
tool_call: Dict[str, Any], | |
tool_result: Any, | |
**kwargs | |
) -> Dict[str, Any]: | |
""" | |
Execute a tool and continue the conversation with the tool result. | |
Args: | |
conversation_data: Conversation data returned from create_conversation. | |
tool_call: Tool call information returned from generate_with_tools. | |
tool_result: Result of executing the tool. | |
**kwargs: Additional parameters for the response. | |
Returns: | |
Dict containing the updated conversation state and response. | |
""" | |
provider_value = conversation_data.get("provider") | |
if not provider_value: | |
return { | |
"error": "Invalid conversation data. Provider not specified.", | |
"status": "error" | |
} | |
try: | |
provider = AIProvider(provider_value) | |
except ValueError: | |
return { | |
"error": f"Unknown provider: {provider_value}", | |
"status": "error" | |
} | |
# Check if provider is available | |
if provider not in self.available_providers: | |
return { | |
"error": f"Provider {provider.value} is not available.", | |
"status": "error" | |
} | |
# Currently only Claude supports tool calls | |
if provider == AIProvider.CLAUDE: | |
try: | |
# Get the conversation | |
conversation = conversation_data.get("conversation") | |
if not conversation: | |
return { | |
"error": "Invalid conversation data. Conversation not found.", | |
"status": "error" | |
} | |
# Format the tool result as expected by Claude | |
tool_response = { | |
"type": "tool_result", | |
"tool_call_id": tool_call.get("id"), | |
"tool_name": tool_call.get("tool_name"), | |
"result": json.dumps(tool_result) if not isinstance(tool_result, str) else tool_result | |
} | |
# Add the tool response to the conversation | |
conversation["messages"].append({ | |
"role": "assistant", | |
"content": tool_response | |
}) | |
# Get the next response from Claude | |
response = self.clients[provider].continue_conversation( | |
conversation=conversation, | |
message="", # No new user message needed, we're just continuing after tool execution | |
temperature=kwargs.get("temperature", 0.7), | |
max_tokens=kwargs.get("max_tokens", 4096) | |
) | |
return { | |
"conversation_id": conversation_data.get("conversation_id"), | |
"conversation": response.get("conversation", {}), | |
"response": response.get("response", ""), | |
"provider": provider.value, | |
"full_response": response, | |
"status": response.get("status", "unknown") | |
} | |
except Exception as e: | |
logger.error(f"Error executing tool and continuing conversation with {provider.value}: {e}") | |
return { | |
"error": str(e), | |
"status": "error" | |
} | |
else: | |
return { | |
"error": f"Provider {provider.value} does not support tool calls.", | |
"status": "error" | |
} | |
if __name__ == "__main__": | |
# Simple CLI to test the Unified AI client | |
print("Unified AI Client - Test CLI") | |
print("===========================") | |
client = UnifiedAIClient() | |
print(f"Available providers: {', '.join(client.list_available_providers())}") | |
print(f"Default provider: {client.default_provider.value}") | |
while True: | |
print("\nOptions:") | |
print("1. Generate text") | |
print("2. Generate code") | |
print("3. Compare providers") | |
print("4. Start conversation") | |
print("5. Exit") | |
choice = input("Enter your choice (1-5): ") | |
if choice == "1": | |
prompt = input("Enter your prompt: ") | |
provider_str = input(f"Enter provider ({', '.join(client.list_available_providers())}) or leave blank for default: ") | |
if provider_str: | |
try: | |
provider = AIProvider(provider_str) | |
except ValueError: | |
print(f"Invalid provider: {provider_str}") | |
continue | |
else: | |
provider = None | |
response = client.generate_text(prompt, provider) | |
print("\nResponse:") | |
print(response.get("text", "")) | |
elif choice == "2": | |
prompt = input("Enter your code description: ") | |
language = input("Enter programming language (default: python): ") or "python" | |
provider_str = input(f"Enter provider ({', '.join(client.list_available_providers())}) or leave blank for default: ") | |
if provider_str: | |
try: | |
provider = AIProvider(provider_str) | |
except ValueError: | |
print(f"Invalid provider: {provider_str}") | |
continue | |
else: | |
provider = None | |
response = client.generate_code(prompt, language, provider) | |
print("\nGenerated code:") | |
print(response.get("code", "")) | |
elif choice == "3": | |
prompt = input("Enter your prompt: ") | |
response = client.compare_providers(prompt) | |
print("\nComparison results:") | |
for provider, res in response.get("responses", {}).items(): | |
print(f"\n=== {provider.upper()} ===") | |
print(res.get("text", "")) | |
print("\nLength comparison:") | |
for provider, length in response.get("comparison", {}).get("length_comparison", {}).items(): | |
print(f"{provider}: {length} chars") | |
elif choice == "4": | |
print("\nAvailable providers for conversation:") | |
print(", ".join(client.list_available_providers())) | |
provider_str = input(f"Enter provider ({', '.join(client.list_available_providers())}) or leave blank for default: ") | |
if provider_str: | |
try: | |
provider = AIProvider(provider_str) | |
except ValueError: | |
print(f"Invalid provider: {provider_str}") | |
continue | |
else: | |
provider = None | |
initial_prompt = input("Enter initial message (or leave blank): ") | |
if initial_prompt: | |
conversation = client.create_conversation(provider, initial_prompt) | |
print("\nResponse:") | |
print(conversation.get("response", "")) | |
else: | |
conversation = client.create_conversation(provider) | |
print("\nConversation started. You can now send messages.") | |
while True: | |
user_message = input("You: ") | |
if user_message.lower() in ["exit", "quit"]: | |
print("Exiting conversation.") | |
break | |
response = client.continue_conversation(conversation, user_message) | |
if response.get("status") == "error": | |
print(f"Error: {response.get('error')}") | |
break | |
print(f"AI ({provider.value}): {response.get('response', '')}") | |
elif choice == "5": | |
print("Exiting...") | |
break | |
else: | |
print("Invalid choice. Please enter a number between 1 and 5.") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment