12
12
# See the License for the specific language governing permissions and
13
13
# limitations under the License.
14
14
15
- import json
16
- from typing import Optional , Union
15
+
17
16
from opentelemetry import trace
18
17
from opentelemetry .trace import SpanKind , Span
19
18
from opentelemetry .trace .status import Status , StatusCode
20
19
from opentelemetry .trace .propagation import set_span_in_context
21
- from openai import NOT_GIVEN
22
- from .span_attributes import LLMSpanAttributes , SpanAttributes
23
20
24
- from .utils import silently_fail , extract_content
21
+ from .span_attributes import LLMSpanAttributes , SpanAttributes
22
+ from opentelemetry .semconv ._incubating .attributes import (
23
+ gen_ai_attributes as GenAIAttributes ,
24
+ )
25
+ from .utils import (
26
+ silently_fail ,
27
+ extract_content ,
28
+ get_llm_request_attributes ,
29
+ is_streaming ,
30
+ set_span_attribute ,
31
+ set_event_completion ,
32
+ extract_tools_prompt ,
33
+ )
25
34
from opentelemetry .trace import Tracer
26
35
27
36
28
- def chat_completions_create (original_method , version , tracer : Tracer ):
37
+ def chat_completions_create (tracer : Tracer ):
29
38
"""Wrap the `create` method of the `ChatCompletion` class to trace it."""
30
39
31
40
def traced_method (wrapped , instance , args , kwargs ):
41
+
32
42
llm_prompts = []
43
+
33
44
for item in kwargs .get ("messages" , []):
34
- tools = get_tool_calls (item )
35
- if tools is not None :
36
- tool_calls = []
37
- for tool_call in tools :
38
- tool_call_dict = {
39
- "id" : tool_call .id if hasattr (tool_call , "id" ) else "" ,
40
- "type" : (
41
- tool_call .type
42
- if hasattr (tool_call , "type" )
43
- else ""
44
- ),
45
- }
46
- if hasattr (tool_call , "function" ):
47
- tool_call_dict ["function" ] = {
48
- "name" : (
49
- tool_call .function .name
50
- if hasattr (tool_call .function , "name" )
51
- else ""
52
- ),
53
- "arguments" : (
54
- tool_call .function .arguments
55
- if hasattr (tool_call .function , "arguments" )
56
- else ""
57
- ),
58
- }
59
- tool_calls .append (tool_call_dict )
60
- llm_prompts .append (tool_calls )
61
- else :
62
- llm_prompts .append (item )
45
+ tools_prompt = extract_tools_prompt (item )
46
+ llm_prompts .append (tools_prompt if tools_prompt else item )
63
47
64
48
span_attributes = {
65
49
** get_llm_request_attributes (kwargs , prompts = llm_prompts ),
@@ -74,7 +58,7 @@ def traced_method(wrapped, instance, args, kwargs):
74
58
kind = SpanKind .CLIENT ,
75
59
context = set_span_in_context (trace .get_current_span ()),
76
60
)
77
- _set_input_attributes (span , kwargs , attributes )
61
+ _set_input_attributes (span , attributes )
78
62
79
63
try :
80
64
result = wrapped (* args , ** kwargs )
@@ -86,52 +70,31 @@ def traced_method(wrapped, instance, args, kwargs):
86
70
tool_calls = kwargs .get ("tools" ) is not None ,
87
71
)
88
72
else :
89
- _set_response_attributes (span , kwargs , result )
73
+ _set_response_attributes (span , result )
90
74
span .end ()
91
75
return result
92
76
93
77
except Exception as error :
94
78
span .set_status (Status (StatusCode .ERROR , str (error )))
79
+ span .set_attribute ("error.type" , error .__class__ .__name__ )
95
80
span .end ()
96
81
raise
97
82
98
83
return traced_method
99
84
100
85
101
- def get_tool_calls (item ):
102
- if isinstance (item , dict ):
103
- return item .get ("tool_calls" )
104
- else :
105
- return getattr (item , "tool_calls" , None )
106
-
107
-
108
86
@silently_fail
109
- def _set_input_attributes (span , kwargs , attributes : LLMSpanAttributes ):
110
- tools = []
111
-
112
- if (
113
- kwargs .get ("functions" ) is not None
114
- and kwargs .get ("functions" ) != NOT_GIVEN
115
- ):
116
- for function in kwargs .get ("functions" ):
117
- tools .append (
118
- json .dumps ({"type" : "function" , "function" : function })
119
- )
120
-
121
- if kwargs .get ("tools" ) is not None and kwargs .get ("tools" ) != NOT_GIVEN :
122
- tools .append (json .dumps (kwargs .get ("tools" )))
123
-
124
- if tools :
125
- set_span_attribute (span , SpanAttributes .LLM_TOOLS , json .dumps (tools ))
126
-
87
+ def _set_input_attributes (span , attributes : LLMSpanAttributes ):
127
88
for field , value in attributes .model_dump (by_alias = True ).items ():
128
89
set_span_attribute (span , field , value )
129
90
130
91
131
92
@silently_fail
132
- def _set_response_attributes (span , kwargs , result ):
133
- set_span_attribute (span , SpanAttributes .LLM_RESPONSE_MODEL , result .model )
134
- if hasattr (result , "choices" ) and result .choices is not None :
93
+ def _set_response_attributes (span , result ):
94
+ set_span_attribute (
95
+ span , GenAIAttributes .GEN_AI_RESPONSE_MODEL , result .model
96
+ )
97
+ if getattr (result , "choices" , None ):
135
98
responses = [
136
99
{
137
100
"role" : (
@@ -154,120 +117,30 @@ def _set_response_attributes(span, kwargs, result):
154
117
]
155
118
set_event_completion (span , responses )
156
119
157
- if (
158
- hasattr (result , "system_fingerprint" )
159
- and result .system_fingerprint is not None
160
- and result .system_fingerprint != NOT_GIVEN
161
- ):
120
+ if getattr (result , "system_fingerprint" , None ):
162
121
set_span_attribute (
163
122
span ,
164
123
SpanAttributes .LLM_SYSTEM_FINGERPRINT ,
165
124
result .system_fingerprint ,
166
125
)
167
- # Get the usage
168
- if hasattr (result , "usage" ) and result .usage is not None :
169
- usage = result .usage
170
- if usage is not None :
171
- set_span_attribute (
172
- span ,
173
- SpanAttributes .LLM_USAGE_PROMPT_TOKENS ,
174
- result .usage .prompt_tokens ,
175
- )
176
- set_span_attribute (
177
- span ,
178
- SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ,
179
- result .usage .completion_tokens ,
180
- )
181
- set_span_attribute (
182
- span ,
183
- SpanAttributes .LLM_USAGE_TOTAL_TOKENS ,
184
- result .usage .total_tokens ,
185
- )
186
-
187
-
188
- def set_event_prompt (span : Span , prompt ):
189
- span .add_event (
190
- name = SpanAttributes .LLM_CONTENT_PROMPT ,
191
- attributes = {
192
- SpanAttributes .LLM_PROMPTS : prompt ,
193
- },
194
- )
195
126
196
-
197
- def set_span_attributes (span : Span , attributes : dict ):
198
- for field , value in attributes .model_dump (by_alias = True ).items ():
199
- set_span_attribute (span , field , value )
200
-
201
-
202
- def set_event_completion (span : Span , result_content ):
203
- span .add_event (
204
- name = SpanAttributes .LLM_CONTENT_COMPLETION ,
205
- attributes = {
206
- SpanAttributes .LLM_COMPLETIONS : json .dumps (result_content ),
207
- },
208
- )
209
-
210
-
211
- def set_span_attribute (span : Span , name , value ):
212
- if value is not None :
213
- if value != "" or value != NOT_GIVEN :
214
- if name == SpanAttributes .LLM_PROMPTS :
215
- set_event_prompt (span , value )
216
- else :
217
- span .set_attribute (name , value )
218
- return
219
-
220
-
221
- def is_streaming (kwargs ):
222
- return non_numerical_value_is_set (kwargs .get ("stream" ))
223
-
224
-
225
- def non_numerical_value_is_set (value : Optional [Union [bool , str ]]):
226
- return bool (value ) and value != NOT_GIVEN
227
-
228
-
229
- def get_llm_request_attributes (
230
- kwargs , prompts = None , model = None , operation_name = "chat"
231
- ):
232
-
233
- user = kwargs .get ("user" )
234
- if prompts is None :
235
- prompts = (
236
- [{"role" : user or "user" , "content" : kwargs .get ("prompt" )}]
237
- if "prompt" in kwargs
238
- else None
127
+ # Get the usage
128
+ if getattr (result , "usage" , None ):
129
+ set_span_attribute (
130
+ span ,
131
+ GenAIAttributes .GEN_AI_USAGE_INPUT_TOKENS ,
132
+ result .usage .prompt_tokens ,
133
+ )
134
+ set_span_attribute (
135
+ span ,
136
+ GenAIAttributes .GEN_AI_USAGE_OUTPUT_TOKENS ,
137
+ result .usage .completion_tokens ,
138
+ )
139
+ set_span_attribute (
140
+ span ,
141
+ "gen_ai.usage.total_tokens" ,
142
+ result .usage .total_tokens ,
239
143
)
240
- top_k = (
241
- kwargs .get ("n" )
242
- or kwargs .get ("k" )
243
- or kwargs .get ("top_k" )
244
- or kwargs .get ("top_n" )
245
- )
246
-
247
- top_p = kwargs .get ("p" ) or kwargs .get ("top_p" )
248
- tools = kwargs .get ("tools" )
249
- return {
250
- SpanAttributes .LLM_OPERATION_NAME : operation_name ,
251
- SpanAttributes .LLM_REQUEST_MODEL : model or kwargs .get ("model" ),
252
- SpanAttributes .LLM_IS_STREAMING : kwargs .get ("stream" ),
253
- SpanAttributes .LLM_REQUEST_TEMPERATURE : kwargs .get ("temperature" ),
254
- SpanAttributes .LLM_TOP_K : top_k ,
255
- SpanAttributes .LLM_PROMPTS : json .dumps (prompts ) if prompts else None ,
256
- SpanAttributes .LLM_USER : user ,
257
- SpanAttributes .LLM_REQUEST_TOP_P : top_p ,
258
- SpanAttributes .LLM_REQUEST_MAX_TOKENS : kwargs .get ("max_tokens" ),
259
- SpanAttributes .LLM_SYSTEM_FINGERPRINT : kwargs .get (
260
- "system_fingerprint"
261
- ),
262
- SpanAttributes .LLM_PRESENCE_PENALTY : kwargs .get ("presence_penalty" ),
263
- SpanAttributes .LLM_FREQUENCY_PENALTY : kwargs .get ("frequency_penalty" ),
264
- SpanAttributes .LLM_REQUEST_SEED : kwargs .get ("seed" ),
265
- SpanAttributes .LLM_TOOLS : json .dumps (tools ) if tools else None ,
266
- SpanAttributes .LLM_TOOL_CHOICE : kwargs .get ("tool_choice" ),
267
- SpanAttributes .LLM_REQUEST_LOGPROPS : kwargs .get ("logprobs" ),
268
- SpanAttributes .LLM_REQUEST_LOGITBIAS : kwargs .get ("logit_bias" ),
269
- SpanAttributes .LLM_REQUEST_TOP_LOGPROPS : kwargs .get ("top_logprobs" ),
270
- }
271
144
272
145
273
146
class StreamWrapper :
@@ -277,7 +150,7 @@ def __init__(
277
150
self ,
278
151
stream ,
279
152
span ,
280
- prompt_tokens = None ,
153
+ prompt_tokens = 0 ,
281
154
function_call = False ,
282
155
tool_calls = False ,
283
156
):
@@ -299,17 +172,17 @@ def cleanup(self):
299
172
if self ._span_started :
300
173
set_span_attribute (
301
174
self .span ,
302
- SpanAttributes . LLM_USAGE_PROMPT_TOKENS ,
175
+ GenAIAttributes . GEN_AI_USAGE_INPUT_TOKENS ,
303
176
self .prompt_tokens ,
304
177
)
305
178
set_span_attribute (
306
179
self .span ,
307
- SpanAttributes . LLM_USAGE_COMPLETION_TOKENS ,
180
+ GenAIAttributes . GEN_AI_USAGE_OUTPUT_TOKENS ,
308
181
self .completion_tokens ,
309
182
)
310
183
set_span_attribute (
311
184
self .span ,
312
- SpanAttributes . LLM_USAGE_TOTAL_TOKENS ,
185
+ "gen_ai.usage.total_tokens" ,
313
186
self .prompt_tokens + self .completion_tokens ,
314
187
)
315
188
set_event_completion (
@@ -346,14 +219,14 @@ def __next__(self):
346
219
raise
347
220
348
221
def process_chunk (self , chunk ):
349
- if hasattr (chunk , "model" ) and chunk . model is not None :
222
+ if getattr (chunk , "model" , None ) :
350
223
set_span_attribute (
351
224
self .span ,
352
- SpanAttributes . LLM_RESPONSE_MODEL ,
225
+ GenAIAttributes . GEN_AI_RESPONSE_MODEL ,
353
226
chunk .model ,
354
227
)
355
228
356
- if hasattr (chunk , "choices" ) and chunk . choices is not None :
229
+ if getattr (chunk , "choices" , None ) :
357
230
content = []
358
231
if not self .function_call and not self .tool_calls :
359
232
for choice in chunk .choices :
@@ -383,12 +256,12 @@ def process_chunk(self, chunk):
383
256
if content :
384
257
self .result_content .append (content [0 ])
385
258
386
- if hasattr (chunk , "text" ):
259
+ if getattr (chunk , "text" , None ):
387
260
content = [chunk .text ]
388
261
389
262
if content :
390
263
self .result_content .append (content [0 ])
391
264
392
- if getattr (chunk , "usage" ):
265
+ if getattr (chunk , "usage" , None ):
393
266
self .completion_tokens = chunk .usage .completion_tokens
394
267
self .prompt_tokens = chunk .usage .prompt_tokens
0 commit comments