Using the OpenTelemetry Python SDK

How to send data to Opik using the OpenTelemetry Python SDK

This guide shows you how to directly instrument your Python applications with the OpenTelemetry SDK to send trace data to Opik.

Installation

First, install the required OpenTelemetry packages:

$pip install opentelemetry-api opentelemetry-sdk opentelemetry-exporter-otlp

Full Example

Here’s a complete example that demonstrates how to instrument a chatbot application with OpenTelemetry and send the traces to Opik:

1# Dependencies: opentelemetry-exporter-otlp
2
3import os
4import time
5from opentelemetry import trace
6from opentelemetry.sdk.trace import TracerProvider
7from opentelemetry.sdk.trace.export import BatchSpanProcessor
8from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
9from opentelemetry.sdk.resources import Resource
10from opentelemetry.semconv.resource import ResourceAttributes
11
12
13# Configure OpenTelemetry
14
15# For comet.com
16os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://www.comet.com/opik/api/v1/private/otel"
17os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = "Authorization=<your-api-key>,Comet-Workspace=<your-workspace-name>,projectName=<your-project-name>"
18
19# Configure the tracer provider
20resource = Resource.create({
21 ResourceAttributes.SERVICE_NAME: "opentelemetry-example"
22})
23
24# Create a tracer provider
25tracer_provider = TracerProvider(resource=resource)
26
27# Set up the OTLP HTTP exporter
28otlp_exporter = OTLPSpanExporter()
29
30# Add the exporter to the tracer provider
31tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))
32
33# Set the tracer provider
34trace.set_tracer_provider(tracer_provider)
35
36# Get a tracer
37tracer = trace.get_tracer("example-tracer")
38
39def main():
40 # Simulate user request
41 user_request = "What's the weather like today?"
42
43 # Create a parent span representing the entire chatbot conversation
44 with tracer.start_as_current_span("chatbot_conversation") as conversation_span:
45 print(f"User request: {user_request}")
46
47 # Add user request as an attribute to the parent span
48 conversation_span.set_attribute("input", user_request)
49 conversation_span.set_attribute("conversation.id", "conv_12345")
50 conversation_span.set_attribute("conversation.type", "weather_inquiry")
51
52 # Add thread ID as an attribute to the parent span to group related spans into
53 # a single conversational thread
54 conversation_span.set_attribute("thread_id", "user_12345")
55
56 # Process the user request
57
58 # Simulate initial processing
59 time.sleep(0.2)
60
61 # Create a child span for LLM generation using GenAI conventions
62 with tracer.start_as_current_span("llm_completion") as llm_span:
63 print("Generating LLM response...")
64
65 # Create a prompt for the LLM
66 llm_prompt = f"User question: {user_request}\n\nProvide a concise answer about the weather."
67
68 # Add GenAI semantic convention attributes
69 llm_span.set_attribute("gen_ai.operation.name", "completion")
70 llm_span.set_attribute("gen_ai.system", "gpt")
71 llm_span.set_attribute("gen_ai.request.model", "gpt-4")
72 llm_span.set_attribute("gen_ai.response.model", "gpt-4")
73 llm_span.set_attribute("gen_ai.request.input", llm_prompt) # Add the prompt
74 llm_span.set_attribute("gen_ai.usage.input_tokens", 10) # Example token count
75 llm_span.set_attribute("gen_ai.usage.output_tokens", 25) # Example token count
76 llm_span.set_attribute("gen_ai.usage.total_tokens", 35) # Example token count
77 llm_span.set_attribute("gen_ai.request.temperature", 0.7)
78 llm_span.set_attribute("gen_ai.request.max_tokens", 100)
79
80 # Simulate LLM thinking time
81 time.sleep(0.5)
82
83 # Generate chatbot response
84 chatbot_response = "It's sunny with a high of 75°F in your area today!"
85
86 # Set response in the LLM span
87 llm_span.set_attribute("gen_ai.response.output", chatbot_response)
88
89 print("LLM generation completed")
90
91 # Back in parent span context
92 conversation_span.set_attribute("output", chatbot_response)
93 # Response has been generated
94
95 print(f"Chatbot response: {chatbot_response}")
96
97if __name__ == "__main__":
98 main()
99
100 # Ensure all spans are flushed before the program exits
101 tracer_provider.shutdown()
102
103 print("\nSpans have been sent to OpenTelemetry collector.")
104 print("If you configured Comet.com, you can view the traces in your Comet project.")

Using thread_id as a span attribute allows you to group related spans into a single conversational thread. Created threads can be used to evaluate multi-turn conversations as described in the Multi-turn conversations guide.