Skip to content

Commit b442f8f

Browse files
committed
use grpc endpoint in openai samples, add extra env vars to readme
1 parent 48f0b38 commit b442f8f

File tree

7 files changed

+29
-10
lines changed

7 files changed

+29
-10
lines changed

instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/.env

+3-3
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ OPENAI_API_KEY=sk-YOUR_API_KEY
66
# OPENAI_API_KEY=unused
77
# CHAT_MODEL=qwen2.5:0.5b
88

9-
# Uncomment and change to your OTLP endpoint
10-
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
11-
# OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
9+
# Uncomment and change to your OTLP endpoint and/or
10+
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
11+
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
1212
OTEL_SERVICE_NAME=opentelemetry-python-openai
1313

1414
# Change to 'false' to hide prompt and completion content

instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/README.rst

+7-1
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,18 @@ duration of the chat request. Logs capture the chat request and the generated
99
response, providing a comprehensive view of the performance and behavior of
1010
your OpenAI requests.
1111

12+
Note: `.env <.env>`_ file configures additional environment variables:
13+
14+
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures
15+
OpenAI instrumentation to capture prompt and completion contents on
16+
events.
17+
1218
Setup
1319
-----
1420

1521
Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An
1622
OTLP compatible endpoint should be listening for traces and logs on
17-
http://localhost:4318. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
23+
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
1824

1925
Next, set up a virtual environment like this:
2026

Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
openai~=1.54.4
22

33
opentelemetry-sdk~=1.28.2
4-
opentelemetry-exporter-otlp-proto-http~=1.28.2
4+
opentelemetry-exporter-otlp-proto-grpc~=1.28.2
55
opentelemetry-instrumentation-openai-v2~=2.0b0

instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/.env

+5-3
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,15 @@ OPENAI_API_KEY=sk-YOUR_API_KEY
66
# OPENAI_API_KEY=unused
77
# CHAT_MODEL=qwen2.5:0.5b
88

9-
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
10-
OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
9+
# Uncomment and change to your OTLP endpoint and/or
10+
# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
11+
# OTEL_EXPORTER_OTLP_PROTOCOL=grpc
12+
1113
OTEL_SERVICE_NAME=opentelemetry-python-openai
1214

1315
# Change to 'false' to disable logging
1416
OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
1517
# Change to 'console' if your OTLP endpoint doesn't support logs
16-
OTEL_LOGS_EXPORTER=otlp_proto_http
18+
OTEL_LOGS_EXPORTER=otlp
1719
# Change to 'false' to hide prompt and completion content
1820
OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true

instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/README.rst

+10-1
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,21 @@ duration of the chat request. Logs capture the chat request and the generated
1010
response, providing a comprehensive view of the performance and behavior of
1111
your OpenAI requests.
1212

13+
Note: `.env <.env>`_ file configures additional environment variables:
14+
15+
- `OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true` configures
16+
OpenTelemetry SDK to export logs and events.
17+
- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures
18+
OpenAI instrumentation to capture prompt and completion contents on
19+
events.
20+
- `OTEL_LOGS_EXPORTER=otlp` to specify exporter type.
21+
1322
Setup
1423
-----
1524

1625
Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An
1726
OTLP compatible endpoint should be listening for traces and logs on
18-
http://localhost:4318. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
27+
http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well.
1928

2029
Next, set up a virtual environment like this:
2130

Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
openai~=1.54.4
22

33
opentelemetry-sdk~=1.28.2
4-
opentelemetry-exporter-otlp-proto-http~=1.28.2
4+
opentelemetry-exporter-otlp-proto-grpc~=1.28.2
55
opentelemetry-distro~=0.49b2
66
opentelemetry-instrumentation-openai-v2~=2.0b0

opentelemetry-distro/src/opentelemetry/distro/__init__.py

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from opentelemetry.environment_variables import (
1818
OTEL_METRICS_EXPORTER,
1919
OTEL_TRACES_EXPORTER,
20+
OTEL_LOGS_EXPORTER
2021
)
2122
from opentelemetry.instrumentation.distro import BaseDistro
2223
from opentelemetry.sdk._configuration import _OTelSDKConfigurator
@@ -37,4 +38,5 @@ class OpenTelemetryDistro(BaseDistro):
3738
def _configure(self, **kwargs):
3839
os.environ.setdefault(OTEL_TRACES_EXPORTER, "otlp")
3940
os.environ.setdefault(OTEL_METRICS_EXPORTER, "otlp")
41+
os.environ.setdefault(OTEL_LOGS_EXPORTER, "otlp")
4042
os.environ.setdefault(OTEL_EXPORTER_OTLP_PROTOCOL, "grpc")

0 commit comments

Comments
 (0)