@@ -73,6 +73,28 @@ def _capture_exception(hub, exc):
73
73
hub .capture_event (event , hint = hint )
74
74
75
75
76
+ def _normalize_data (data ):
77
+ # type: (Any) -> Any
78
+
79
+ # convert pydantic data (e.g. OpenAI v1+) to json compatible format
80
+ if hasattr (data , "model_dump" ):
81
+ try :
82
+ return data .model_dump ()
83
+ except Exception as e :
84
+ logger .warning ("Could not convert pydantic data to JSON: %s" , e )
85
+ return data
86
+ if isinstance (data , list ):
87
+ return list (_normalize_data (x ) for x in data )
88
+ if isinstance (data , dict ):
89
+ return {k : _normalize_data (v ) for (k , v ) in data .items ()}
90
+ return data
91
+
92
+
93
+ def set_data_normalized (span , key , value ):
94
+ # type: (Span, str, Any) -> None
95
+ span .set_data (key , _normalize_data (value ))
96
+
97
+
76
98
def _calculate_chat_completion_usage (
77
99
messages , response , span , streaming_message_responses = None
78
100
):
@@ -112,11 +134,11 @@ def _calculate_chat_completion_usage(
112
134
total_tokens = prompt_tokens + completion_tokens
113
135
114
136
if completion_tokens != 0 :
115
- span . set_data ( COMPLETION_TOKENS_USED , completion_tokens )
137
+ set_data_normalized ( span , COMPLETION_TOKENS_USED , completion_tokens )
116
138
if prompt_tokens != 0 :
117
- span . set_data ( PROMPT_TOKENS_USED , prompt_tokens )
139
+ set_data_normalized ( span , PROMPT_TOKENS_USED , prompt_tokens )
118
140
if total_tokens != 0 :
119
- span . set_data ( TOTAL_TOKENS_USED , total_tokens )
141
+ set_data_normalized ( span , TOTAL_TOKENS_USED , total_tokens )
120
142
121
143
122
144
def _wrap_chat_completion_create (f ):
@@ -160,14 +182,17 @@ def new_chat_completion(*args, **kwargs):
160
182
161
183
with capture_internal_exceptions ():
162
184
if _should_send_default_pii () and integration .include_prompts :
163
- span .set_data ("ai.input_messages" , messages )
164
- span .set_data ("ai.model_id" , model )
165
- span .set_data ("ai.streaming" , streaming )
185
+ set_data_normalized (span , "ai.input_messages" , messages )
186
+
187
+ set_data_normalized (span , "ai.model_id" , model )
188
+ set_data_normalized (span , "ai.streaming" , streaming )
166
189
167
190
if hasattr (res , "choices" ):
168
191
if _should_send_default_pii () and integration .include_prompts :
169
- span .set_data (
170
- "ai.responses" , list (map (lambda x : x .message , res .choices ))
192
+ set_data_normalized (
193
+ span ,
194
+ "ai.responses" ,
195
+ list (map (lambda x : x .message , res .choices )),
171
196
)
172
197
_calculate_chat_completion_usage (messages , res , span )
173
198
span .__exit__ (None , None , None )
@@ -200,15 +225,15 @@ def new_iterator():
200
225
_should_send_default_pii ()
201
226
and integration .include_prompts
202
227
):
203
- span . set_data ( "ai.responses" , all_responses )
228
+ set_data_normalized ( span , "ai.responses" , all_responses )
204
229
_calculate_chat_completion_usage (
205
230
messages , res , span , all_responses
206
231
)
207
232
span .__exit__ (None , None , None )
208
233
209
234
res ._iterator = new_iterator ()
210
235
else :
211
- span . set_data ( "unknown_response" , True )
236
+ set_data_normalized ( span , "unknown_response" , True )
212
237
span .__exit__ (None , None , None )
213
238
return res
214
239
@@ -238,15 +263,15 @@ def new_embeddings_create(*args, **kwargs):
238
263
_should_send_default_pii () and integration .include_prompts
239
264
):
240
265
if isinstance (kwargs ["input" ], str ):
241
- span . set_data ( "ai.input_messages" , [kwargs ["input" ]])
266
+ set_data_normalized ( span , "ai.input_messages" , [kwargs ["input" ]])
242
267
elif (
243
268
isinstance (kwargs ["input" ], list )
244
269
and len (kwargs ["input" ]) > 0
245
270
and isinstance (kwargs ["input" ][0 ], str )
246
271
):
247
- span . set_data ( "ai.input_messages" , kwargs ["input" ])
272
+ set_data_normalized ( span , "ai.input_messages" , kwargs ["input" ])
248
273
if "model" in kwargs :
249
- span . set_data ( "ai.model_id" , kwargs ["model" ])
274
+ set_data_normalized ( span , "ai.model_id" , kwargs ["model" ])
250
275
try :
251
276
response = f (* args , ** kwargs )
252
277
except Exception as e :
@@ -271,8 +296,8 @@ def new_embeddings_create(*args, **kwargs):
271
296
if total_tokens == 0 :
272
297
total_tokens = prompt_tokens
273
298
274
- span . set_data ( PROMPT_TOKENS_USED , prompt_tokens )
275
- span . set_data ( TOTAL_TOKENS_USED , total_tokens )
299
+ set_data_normalized ( span , PROMPT_TOKENS_USED , prompt_tokens )
300
+ set_data_normalized ( span , TOTAL_TOKENS_USED , total_tokens )
276
301
277
302
return response
278
303
0 commit comments