Skip to content

Commit 8495a24

Browse files
committed
cleanup setting prompt events & finish reasons
1 parent 71aaeb6 commit 8495a24

File tree

2 files changed

+26
-43
lines changed
  • instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai

2 files changed

+26
-43
lines changed

instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/patch.py

+23-17
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,11 @@
1616
from opentelemetry.trace import SpanKind, Span
1717
from opentelemetry.trace.status import Status, StatusCode
1818
from .span_attributes import LLMSpanAttributes
19+
from opentelemetry.semconv.attributes import (
20+
error_attributes as ErrorAttributes,
21+
)
1922
from opentelemetry.semconv._incubating.attributes import (
2023
gen_ai_attributes as GenAIAttributes,
21-
error_attributes as ErrorAttributes,
2224
)
2325
from .utils import (
2426
silently_fail,
@@ -28,8 +30,10 @@
2830
set_span_attribute,
2931
set_event_completion,
3032
extract_tools_prompt,
33+
set_event_prompt,
3134
)
3235
from opentelemetry.trace import Tracer
36+
import json
3337

3438

3539
def chat_completions_create(tracer: Tracer):
@@ -43,16 +47,15 @@ def traced_method(wrapped, instance, args, kwargs):
4347
tools_prompt = extract_tools_prompt(item)
4448
llm_prompts.append(tools_prompt if tools_prompt else item)
4549

46-
span_attributes = {
47-
**get_llm_request_attributes(kwargs, prompts=llm_prompts),
48-
}
50+
span_attributes = {**get_llm_request_attributes(kwargs)}
4951

5052
attributes = LLMSpanAttributes(**span_attributes)
5153

5254
span_name = f"{attributes.gen_ai_operation_name} {attributes.gen_ai_request_model}"
5355

5456
span = tracer.start_span(name=span_name, kind=SpanKind.CLIENT)
5557
_set_input_attributes(span, attributes)
58+
set_event_prompt(span, json.dumps(llm_prompts))
5659

5760
try:
5861
result = wrapped(*args, **kwargs)
@@ -112,13 +115,15 @@ def _set_response_attributes(span, result):
112115
}
113116
for choice in choices
114117
]
118+
finish_reasons = []
115119
for choice in choices:
116-
if choice.finish_reason:
117-
set_span_attribute(
118-
span,
119-
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
120-
choice.finish_reason,
121-
)
120+
finish_reasons.append(choice.finish_reason or "error")
121+
122+
set_span_attribute(
123+
span,
124+
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
125+
finish_reasons,
126+
)
122127
set_event_completion(span, responses)
123128

124129
if getattr(result, "id", None):
@@ -270,14 +275,15 @@ def build_streaming_response(self, chunk):
270275
):
271276
content.append(tool_call.function.arguments)
272277

278+
finish_reasons = []
273279
for choice in choices:
274-
finish_reason = choice.finish_reason
275-
if finish_reason:
276-
set_span_attribute(
277-
self.span,
278-
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
279-
finish_reason,
280-
)
280+
finish_reasons.append(choice.finish_reason or "error")
281+
282+
set_span_attribute(
283+
self.span,
284+
GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS,
285+
finish_reasons,
286+
)
281287
if content:
282288
self.result_content.append(content[0])
283289

instrumentation/opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/utils.py

+3-26
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,7 @@ def set_span_attribute(span: Span, name, value):
127127
if non_numerical_value_is_set(value) is False:
128128
return
129129

130-
if name == GenAIAttributes.GEN_AI_PROMPT:
131-
set_event_prompt(span, value)
132-
else:
133-
span.set_attribute(name, value)
130+
span.set_attribute(name, value)
134131

135132

136133
def is_streaming(kwargs):
@@ -143,37 +140,17 @@ def non_numerical_value_is_set(value: Optional[Union[bool, str]]):
143140

144141
def get_llm_request_attributes(
145142
kwargs,
146-
prompts=None,
147143
model=None,
148144
operation_name=GenAIAttributes.GenAiOperationNameValues.CHAT.value,
149145
):
150146

151-
user = kwargs.get("user")
152-
if prompts is None:
153-
prompts = (
154-
[{"role": user or "user", "content": kwargs.get("prompt")}]
155-
if "prompt" in kwargs
156-
else None
157-
)
158-
top_k = (
159-
kwargs.get("n")
160-
or kwargs.get("k")
161-
or kwargs.get("top_k")
162-
or kwargs.get("top_n")
163-
)
164-
165-
top_p = kwargs.get("p") or kwargs.get("top_p")
166-
167147
return {
168148
GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name,
169149
GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.OPENAI.value,
170150
GenAIAttributes.GEN_AI_REQUEST_MODEL: model or kwargs.get("model"),
171151
GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE: kwargs.get("temperature"),
172-
GenAIAttributes.GEN_AI_REQUEST_TOP_K: top_k,
173-
GenAIAttributes.GEN_AI_PROMPT: (
174-
json.dumps(prompts) if prompts else None
175-
),
176-
GenAIAttributes.GEN_AI_REQUEST_TOP_P: top_p,
152+
GenAIAttributes.GEN_AI_REQUEST_TOP_P: kwargs.get("p")
153+
or kwargs.get("top_p"),
177154
GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS: kwargs.get("max_tokens"),
178155
GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY: kwargs.get(
179156
"presence_penalty"

0 commit comments

Comments
 (0)