Last active
May 16, 2025 02:09
-
-
Save keithchambers/d197b9e2a7e06a109798fbcbc5abc38b to your computer and use it in GitHub Desktop.
OTEL remapping
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
receivers: | |
otlp: | |
protocols: | |
grpc: {} | |
http: {} | |
processors: | |
transform/normalise_keys: | |
error_mode: ignore | |
trace_statements: | |
- context: span | |
statements: | |
- replace_all_patterns(attributes, "key", "([a-z0-9])([A-Z])", "$1_$2") | |
- replace_all_patterns(attributes, "key", "([A-Z]+)([A-Z][a-z])", "$1_$2") | |
- set(attributes["gen_ai.system"], ConvertCase.Lowercase(attributes["gen_ai.system"])) | |
transform/genai: | |
error_mode: ignore | |
trace_statements: | |
# OpenInference | |
- context: span | |
statements: | |
- set(temp, attributes["llm.invocation_parameters"]) where attributes["llm.invocation_parameters"] != nil | |
- set(attributes["gen_ai.system"], "openinference") where attributes["llm.model_name"] != nil | |
- set(attributes["gen_ai.request.model"], attributes["llm.model_name"]) where attributes["llm.model_name"] != nil | |
- set(attributes["gen_ai.usage.input_tokens"], attributes["llm.token_count.prompt"]) where attributes["llm.token_count.prompt"] != nil | |
- set(attributes["gen_ai.usage.output_tokens"], attributes["llm.token_count.completion"]) where attributes["llm.token_count.completion"] != nil | |
- set(attributes["gen_ai.usage.total_tokens"], attributes["llm.token_count.total"]) where attributes["llm.token_count.total"] != nil | |
- set(attributes["gen_ai.content.prompt"], attributes["llm.input.prompt"]) where attributes["llm.input.prompt"] != nil | |
- set(attributes["gen_ai.content.completion"], attributes["llm.output.generated_text"]) where attributes["llm.output.generated_text"] != nil | |
- set(attributes["gen_ai.request.temperature"], json_extract(temp, "$.temperature")) where temp != nil && json_extract(temp, "$.temperature") != nil | |
- set(attributes["gen_ai.request.top_p"], json_extract(temp, "$.top_p")) where temp != nil && json_extract(temp, "$.top_p") != nil | |
- set(attributes["gen_ai.request.max_tokens"], json_extract(temp, "$.max_tokens")) where temp != nil && json_extract(temp, "$.max_tokens") != nil | |
- set(attributes["gen_ai.request.presence_penalty"], json_extract(temp, "$.presence_penalty")) where temp != nil && json_extract(temp, "$.presence_penalty") != nil | |
- set(attributes["gen_ai.request.frequency_penalty"], json_extract(temp, "$.frequency_penalty")) where temp != nil && json_extract(temp, "$.frequency_penalty") != nil | |
- set(attributes["gen_ai.request.stop_sequences"], json_extract(temp, "$.stop_sequences")) where temp != nil && json_extract(temp, "$.stop_sequences") != nil | |
# clean up originals | |
- delete_key(attributes, "llm.model_name") | |
- delete_key(attributes, "llm.token_count.prompt") | |
- delete_key(attributes, "llm.token_count.completion") | |
- delete_key(attributes, "llm.token_count.total") | |
- delete_key(attributes, "llm.input.prompt") | |
- delete_key(attributes, "llm.output.generated_text") | |
- delete_key(attributes, "llm.invocation_parameters") | |
- delete_key(attributes, "temp") | |
# OpenAI | |
- context: span | |
statements: | |
- set(attributes["gen_ai.system"], "openai") where attributes["openai.request.model"] != nil | |
- set(attributes["gen_ai.request.model"], attributes["openai.request.model"]) where attributes["openai.request.model"] != nil | |
- set(attributes["gen_ai.response.id"], attributes["openai.response.id"]) where attributes["openai.response.id"] != nil | |
- set(attributes["gen_ai.response.model"], attributes["openai.response.model"]) where attributes["openai.response.model"] != nil | |
- set(attributes["gen_ai.openai.request.service_tier"], attributes["openai.request.service_tier"]) where attributes["openai.request.service_tier"] != nil | |
- set(attributes["gen_ai.openai.response.service_tier"], attributes["openai.response.service_tier"]) where attributes["openai.response.service_tier"] != nil | |
- set(attributes["gen_ai.usage.input_tokens"], attributes["openai.usage.prompt_tokens"]) where attributes["openai.usage.prompt_tokens"] != nil | |
- set(attributes["gen_ai.usage.output_tokens"], attributes["openai.usage.completion_tokens"]) where attributes["openai.usage.completion_tokens"] != nil | |
- set(attributes["gen_ai.usage.total_tokens"], attributes["openai.usage.total_tokens"]) where attributes["openai.usage.total_tokens"] != nil | |
- set(attributes["gen_ai.content.prompt"], coalesce([attributes["openai.request.prompt"], attributes["openai.request.messages"]])) where attributes["openai.request.prompt"] != nil || attributes["openai.request.messages"] != nil | |
- set(attributes["gen_ai.content.completion"], coalesce([attributes["openai.response.text"], json_extract(attributes["openai.response.choices"], "$[0].message.content")])) where attributes["openai.response.text"] != nil || attributes["openai.response.choices"] != nil | |
- set(attributes["gen_ai.request.temperature"], attributes["openai.request.temperature"]) where attributes["openai.request.temperature"] != nil | |
- set(attributes["gen_ai.request.max_tokens"], attributes["openai.request.max_tokens"]) where attributes["openai.request.max_tokens"] != nil | |
- set(attributes["gen_ai.request.top_p"], attributes["openai.request.top_p"]) where attributes["openai.request.top_p"] != nil | |
- set(attributes["gen_ai.request.presence_penalty"], attributes["openai.request.presence_penalty"]) where attributes["openai.request.presence_penalty"] != nil | |
- set(attributes["gen_ai.request.frequency_penalty"], attributes["openai.request.frequency_penalty"]) where attributes["openai.request.frequency_penalty"] != nil | |
- set(attributes["gen_ai.request.stop_sequences"], attributes["openai.request.stop"]) where attributes["openai.request.stop"] != nil | |
- set(attributes["gen_ai.response.finish_reasons"], json_extract(attributes["openai.response.choices"], "$[0].finish_reason")) where attributes["openai.response.choices"] != nil | |
# delete OpenAI originals | |
- delete_key(attributes, "openai.request.model") | |
- delete_key(attributes, "openai.response.id") | |
- delete_key(attributes, "openai.request.service_tier") | |
- delete_key(attributes, "openai.response.service_tier") | |
- delete_key(attributes, "openai.usage.prompt_tokens") | |
- delete_key(attributes, "openai.usage.completion_tokens") | |
- delete_key(attributes, "openai.usage.total_tokens") | |
- delete_key(attributes, "openai.request.prompt") | |
- delete_key(attributes, "openai.request.messages") | |
- delete_key(attributes, "openai.response.text") | |
- delete_key(attributes, "openai.response.choices") | |
- delete_key(attributes, "openai.request.temperature") | |
- delete_key(attributes, "openai.request.max_tokens") | |
- delete_key(attributes, "openai.request.top_p") | |
- delete_key(attributes, "openai.request.presence_penalty") | |
- delete_key(attributes, "openai.request.frequency_penalty") | |
- delete_key(attributes, "openai.request.stop") | |
# AWS Bedrock | |
- context: span | |
statements: | |
- set(attributes["gen_ai.system"], "aws.bedrock") where attributes["aws.bedrock.model_id"] != nil | |
- set(attributes["gen_ai.request.model"], attributes["aws.bedrock.model_id"]) where attributes["aws.bedrock.model_id"] != nil | |
- set(attributes["gen_ai.response.id"], attributes["aws.bedrock.response.request_id"]) where attributes["aws.bedrock.response.request_id"] != nil | |
- set(attributes["gen_ai.response.model"], attributes["aws.bedrock.model_id"]) where attributes["aws.bedrock.model_id"] != nil | |
- set(attributes["gen_ai.usage.input_tokens"], attributes["aws.bedrock.usage.input_tokens"]) where attributes["aws.bedrock.usage.input_tokens"] != nil | |
- set(attributes["gen_ai.usage.output_tokens"], attributes["aws.bedrock.usage.output_tokens"]) where attributes["aws.bedrock.usage.output_tokens"] != nil | |
- set(attributes["gen_ai.content.prompt"], attributes["aws.bedrock.request.prompt"]) where attributes["aws.bedrock.request.prompt"] != nil | |
- set(attributes["gen_ai.content.completion"], attributes["aws.bedrock.response.text"]) where attributes["aws.bedrock.response.text"] != nil | |
- set(attributes["gen_ai.response.finish_reasons"], attributes["aws.bedrock.response.stop_reason"]) where attributes["aws.bedrock.response.stop_reason"] != nil | |
- set(attributes["gen_ai.output.type"], attributes["aws.bedrock.response.content_type"]) where attributes["aws.bedrock.response.content_type"] != nil | |
# delete Bedrock originals | |
- delete_key(attributes, "aws.bedrock.model_id") | |
- delete_key(attributes, "aws.bedrock.usage.input_tokens") | |
- delete_key(attributes, "aws.bedrock.usage.output_tokens") | |
- delete_key(attributes, "aws.bedrock.request.prompt") | |
- delete_key(attributes, "aws.bedrock.response.text") | |
- delete_key(attributes, "aws.bedrock.response.stop_reason") | |
- delete_key(attributes, "aws.bedrock.response.request_id") | |
- delete_key(attributes, "aws.bedrock.response.content_type") | |
# Azure AI Inference | |
- context: span | |
statements: | |
- set(attributes["gen_ai.system"], "az.ai.inference") where attributes["azure.ai.model_name"] != nil || attributes["azure.ai.model"] != nil | |
- set(attributes["gen_ai.request.model"], coalesce([attributes["azure.ai.model_name"], attributes["azure.ai.model"]])) where attributes["azure.ai.model_name"] != nil || attributes["azure.ai.model"] != nil | |
- set(attributes["gen_ai.operation.name"], attributes["azure.ai.operation_name"]) where attributes["azure.ai.operation_name"] != nil | |
- set(attributes["gen_ai.request.temperature"], attributes["azure.ai.temperature"]) where attributes["azure.ai.temperature"] != nil | |
- set(attributes["gen_ai.request.max_tokens"], attributes["azure.ai.request.max_tokens"]) where attributes["azure.ai.request.max_tokens"] != nil | |
- set(attributes["gen_ai.request.top_p"], attributes["azure.ai.request.top_p"]) where attributes["azure.ai.request.top_p"] != nil | |
- set(attributes["gen_ai.request.choice.count"], attributes["azure.ai.request.choice_count"]) where attributes["azure.ai.request.choice_count"] != nil | |
- set(attributes["gen_ai.request.seed"], attributes["azure.ai.request.seed"]) where attributes["azure.ai.request.seed"] != nil | |
- set(attributes["gen_ai.usage.input_tokens"], attributes["azure.ai.usage.input_tokens"]) where attributes["azure.ai.usage.input_tokens"] != nil | |
- set(attributes["gen_ai.usage.output_tokens"], attributes["azure.ai.usage.output_tokens"]) where attributes["azure.ai.usage.output_tokens"] != nil | |
- set(attributes["gen_ai.content.prompt"], attributes["azure.ai.request.prompt"]) where attributes["azure.ai.request.prompt"] != nil | |
- set(attributes["gen_ai.content.completion"], attributes["azure.ai.response.text"]) where attributes["azure.ai.response.text"] != nil | |
- set(attributes["gen_ai.response.finish_reasons"], attributes["azure.ai.response.finish_reason"]) where attributes["azure.ai.response.finish_reason"] != nil | |
- set(attributes["gen_ai.response.id"], attributes["azure.ai.response.request_id"]) where attributes["azure.ai.response.request_id"] != nil | |
- set(attributes["gen_ai.response.model"], attributes["azure.ai.response.model"]) where attributes["azure.ai.response.model"] != nil | |
# delete Azure originals | |
- delete_key(attributes, "azure.ai.model_name") | |
- delete_key(attributes, "azure.ai.model") | |
- delete_key(attributes, "azure.ai.operation_name") | |
- delete_key(attributes, "azure.ai.temperature") | |
- delete_key(attributes, "azure.ai.request.max_tokens") | |
- delete_key(attributes, "azure.ai.request.top_p") | |
- delete_key(attributes, "azure.ai.request.choice_count") | |
- delete_key(attributes, "azure.ai.request.seed") | |
- delete_key(attributes, "azure.ai.usage.input_tokens") | |
- delete_key(attributes, "azure.ai.usage.output_tokens") | |
- delete_key(attributes, "azure.ai.request.prompt") | |
- delete_key(attributes, "azure.ai.response.text") | |
- delete_key(attributes, "azure.ai.response.finish_reason") | |
- delete_key(attributes, "azure.ai.response.request_id") | |
- delete_key(attributes, "azure.ai.response.model") | |
exporters: | |
otlp/fiddler: | |
endpoint: "${FIDDLER_OTEL_ENDPOINT}" | |
tls: | |
insecure: true | |
service: | |
pipelines: | |
traces: | |
receivers: [otlp] | |
processors: [transform/normalise_keys, transform/genai] | |
exporters: [otlp/fiddler] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment