Last active
May 31, 2025 10:21
-
-
Save Tushkiz/d8794919b3bdeb848e2759fa492f7f03 to your computer and use it in GitHub Desktop.
this script demonstrates how to instrument OpenAI API calls with OpenTelemetry to enable LLM observability with Maxim AI
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
from time import sleep | |
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter | |
from opentelemetry.sdk.trace.export import SimpleSpanProcessor | |
from dotenv import load_dotenv | |
from opentelemetry.sdk import trace as trace_sdk | |
from opentelemetry import trace as trace_api | |
from opentelemetry import trace | |
import os | |
from openai import OpenAI | |
load_dotenv() | |
maxim_api_key = "" | |
repo_id = "" | |
tracer_provider = trace_sdk.TracerProvider() | |
span_exporter = OTLPSpanExporter( | |
endpoint=os.getenv( | |
"OTEL_EXPORTER_OTLP_ENDPOINT", | |
"https://api.getmaxim.ai/v1/otel", | |
), | |
headers={ | |
"x-maxim-api-key": f"{maxim_api_key}", | |
"x-maxim-repo-id": f"{repo_id}", | |
}, | |
) | |
span_processor = SimpleSpanProcessor(span_exporter) | |
tracer_provider.add_span_processor(span_processor) | |
trace_api.set_tracer_provider(tracer_provider) | |
tracer = trace.get_tracer("maxim_test_tracer") | |
client = OpenAI() | |
with tracer.start_as_current_span("test") as span: | |
span.set_attribute("parent_span", "This is a parent span") | |
sleep(1) | |
with tracer.start_as_current_span("genai_chat_completion") as genai_span: | |
# Set GenAI span attributes | |
genai_span.set_attribute("gen_ai.system", "openai") | |
genai_span.set_attribute("gen_ai.request.model", "gpt-4o-mini") | |
genai_span.set_attribute("gen_ai.request.max_tokens", 200) | |
system_message = "You are a helpful assistant and you are going to answer in one sentence only." | |
user_message = "What is LLM Observability?" | |
# System message event | |
genai_span.add_event( | |
"gen_ai.system.message", | |
attributes={ | |
"gen_ai.system": "openai", | |
"content": system_message, | |
"role": "system", | |
}, | |
) | |
# User message event | |
genai_span.add_event( | |
"gen_ai.user.message", | |
attributes={ | |
"gen_ai.system": "openai", | |
"content": user_message, | |
"role": "user", | |
}, | |
) | |
# Make the API call | |
chat_completion = client.chat.completions.create( | |
messages=[ | |
{ | |
"role": "system", | |
"content": system_message, | |
}, | |
{ | |
"role": "user", | |
"content": user_message, | |
}, | |
], | |
model="gpt-4o-mini", | |
) | |
# Set response attributes | |
genai_span.set_attribute("gen_ai.response.id", chat_completion.id) | |
genai_span.set_attribute("gen_ai.response.model", chat_completion.model) | |
genai_span.set_attribute( | |
"gen_ai.usage.input_tokens", chat_completion.usage.prompt_tokens | |
) | |
genai_span.set_attribute( | |
"gen_ai.usage.output_tokens", chat_completion.usage.completion_tokens | |
) | |
# Assistant message event (optional - captures the response) | |
genai_span.add_event( | |
"gen_ai.assistant.message", | |
attributes={ | |
"gen_ai.system": "openai", | |
"content": chat_completion.choices[0].message.content, | |
"role": "assistant", | |
}, | |
) | |
# Choice events - one for each choice | |
for choice in chat_completion.choices: | |
choice_body = { | |
"finish_reason": choice.finish_reason, | |
"index": choice.index, | |
"message": { | |
"content": choice.message.content, | |
"role": choice.message.role, | |
}, | |
} | |
# Only include tool_calls if present | |
if choice.message.tool_calls: | |
choice_body["message"]["tool_calls"] = [ | |
{ | |
"id": tool_call.id, | |
"type": tool_call.type, | |
"function": { | |
"name": tool_call.function.name, | |
"arguments": tool_call.function.arguments, | |
}, | |
} | |
for tool_call in choice.message.tool_calls | |
] | |
genai_span.add_event( | |
"gen_ai.choice", | |
attributes={ | |
"finish_reason": choice_body["finish_reason"], | |
"index": choice_body["index"], | |
"message": json.dumps(choice_body["message"]), | |
}, | |
) | |
with tracer.start_as_current_span("grandchild_function") as span: | |
span.set_attribute("grandchild_span", "This is a grandchild span") | |
sleep(1) | |
print("Hello, world Maxim!") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment