@@ -121,13 +121,13 @@ def _add_ai_data_to_span(
121
121
with capture_internal_exceptions ():
122
122
if should_send_default_pii () and integration .include_prompts :
123
123
complete_message = "" .join (content_blocks )
124
- span .set_data (
124
+ span .set_attribute (
125
125
SPANDATA .AI_RESPONSES ,
126
126
[{"type" : "text" , "text" : complete_message }],
127
127
)
128
128
total_tokens = input_tokens + output_tokens
129
129
record_token_usage (span , input_tokens , output_tokens , total_tokens )
130
- span .set_data (SPANDATA .AI_STREAMING , True )
130
+ span .set_attribute (SPANDATA .AI_STREAMING , True )
131
131
132
132
133
133
def _sentry_patched_create_common (f , * args , ** kwargs ):
@@ -159,15 +159,17 @@ def _sentry_patched_create_common(f, *args, **kwargs):
159
159
model = kwargs .get ("model" )
160
160
161
161
with capture_internal_exceptions ():
162
- span .set_data (SPANDATA .AI_MODEL_ID , model )
163
- span .set_data (SPANDATA .AI_STREAMING , False )
162
+ span .set_attribute (SPANDATA .AI_MODEL_ID , model )
163
+ span .set_attribute (SPANDATA .AI_STREAMING , False )
164
164
165
165
if should_send_default_pii () and integration .include_prompts :
166
- span .set_data (SPANDATA .AI_INPUT_MESSAGES , messages )
166
+ span .set_attribute (SPANDATA .AI_INPUT_MESSAGES , messages )
167
167
168
168
if hasattr (result , "content" ):
169
169
if should_send_default_pii () and integration .include_prompts :
170
- span .set_data (SPANDATA .AI_RESPONSES , _get_responses (result .content ))
170
+ span .set_attribute (
171
+ SPANDATA .AI_RESPONSES , _get_responses (result .content )
172
+ )
171
173
_calculate_token_usage (result , span )
172
174
span .__exit__ (None , None , None )
173
175
@@ -215,7 +217,7 @@ async def new_iterator_async():
215
217
result ._iterator = new_iterator ()
216
218
217
219
else :
218
- span .set_data ("unknown_response" , True )
220
+ span .set_attribute ("unknown_response" , True )
219
221
span .__exit__ (None , None , None )
220
222
221
223
return result
0 commit comments