Skip to content

Instantly share code, notes, and snippets.

@owainlewis
Created September 17, 2025 13:57
Show Gist options
  • Save owainlewis/ff9cbed15108067bf2946f3a3488b977 to your computer and use it in GitHub Desktop.
Save owainlewis/ff9cbed15108067bf2946f3a3488b977 to your computer and use it in GitHub Desktop.
Calling OCI GenAI
import oci
from oci.generative_ai_inference import GenerativeAiInferenceClient
from oci.generative_ai_inference.models import (
ChatDetails,
CohereChatRequest,
OnDemandServingMode
)
# Initialize the client
config = oci.config.from_file("~/.oci/config", "DEFAULT")
client = GenerativeAiInferenceClient(config)
# Create the chat request
chat_request = CohereChatRequest(
message="Hello, how are you?",
)
# Create chat details with on-demand serving mode
chat_details = ChatDetails(
compartment_id="ocid1.compartment.oc1..xyz",
serving_mode=OnDemandServingMode(
model_id="cohere.command-a-03-2025"
),
chat_request=chat_request
)
# Make the API call
try:
response = client.chat(chat_details)
if response.status == 200:
chat_result = response.data
# Extract the text from the response
if hasattr(chat_result, 'chat_response') and hasattr(chat_result.chat_response, 'text'):
print(chat_result.chat_response.text)
else:
print(str(chat_result))
else:
print(f"Error: Request failed with status {response.status}")
except Exception as e:
print(f"Error: {e}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment