@@ -334,7 +334,7 @@ def _extract_llama_attributes(self, attributes, request_body):
334
334
attributes , GEN_AI_REQUEST_TOP_P , request_body .get ("top_p" )
335
335
)
336
336
# request for meta llama models does not contain stop_sequences field
337
-
337
+
338
338
def _extract_mistral_attributes (self , attributes , request_body ):
339
339
prompt = request_body .get ("prompt" )
340
340
if prompt :
@@ -382,24 +382,30 @@ def _get_request_messages(self):
382
382
if not messages :
383
383
model_id = self ._call_context .params .get (_MODEL_ID_KEY )
384
384
if "amazon.titan" in model_id :
385
- if input_text := decoded_body .get ("inputText" ):
386
- messages = [
387
- {"role" : "user" , "content" : [{"text" : input_text }]}
388
- ]
385
+ messages = self ._get_messages_from_input_text (
386
+ decoded_body , "inputText"
387
+ )
389
388
elif "cohere.command-r" in model_id :
390
389
# chat_history can be converted to messages; for now, just use message
391
- if input_text := decoded_body .get ("message" ):
392
- messages = [
393
- {"role" : "user" , "content" : [{"text" : input_text }]}
394
- ]
390
+ messages = self ._get_messages_from_input_text (
391
+ decoded_body , "message"
392
+ )
395
393
elif "cohere.command" in model_id or "meta.llama" in model_id or "mistral.mistral" in model_id :
396
- if input_text := decoded_body .get ("prompt" ):
397
- messages = [
398
- {"role" : "user" , "content" : [{"text" : input_text }]}
399
- ]
394
+ messages = self ._get_messages_from_input_text (
395
+ decoded_body , "prompt"
396
+ )
400
397
401
398
return system_messages + messages
402
399
400
+ def _get_messages_from_input_text (
401
+ self , decoded_body : dict [str , Any ], input_name : str
402
+ ):
403
+ if input_text := decoded_body .get (input_name ):
404
+ return [
405
+ {"role" : "user" , "content" : [{"text" : input_text }]}
406
+ ]
407
+ return []
408
+
403
409
def before_service_call (
404
410
self , span : Span , instrumentor_context : _BotocoreInstrumentorContext
405
411
):
@@ -827,7 +833,7 @@ def _handle_anthropic_claude_response(
827
833
token_usage_histogram .record (
828
834
output_tokens , output_attributes
829
835
)
830
-
836
+
831
837
def _handle_cohere_command_r_response (
832
838
self ,
833
839
span : Span ,
@@ -843,13 +849,13 @@ def _handle_cohere_command_r_response(
843
849
span .set_attribute (
844
850
GEN_AI_RESPONSE_FINISH_REASONS , [response_body ["finish_reason" ]]
845
851
)
846
-
852
+
847
853
event_logger = instrumentor_context .event_logger
848
854
choice = _Choice .from_invoke_cohere_command_r (
849
855
response_body , capture_content
850
856
)
851
857
event_logger .emit (choice .to_choice_event ())
852
-
858
+
853
859
def _handle_cohere_command_response (
854
860
self ,
855
861
span : Span ,
@@ -867,7 +873,7 @@ def _handle_cohere_command_response(
867
873
span .set_attribute (
868
874
GEN_AI_RESPONSE_FINISH_REASONS , [generations ["finish_reason" ]]
869
875
)
870
-
876
+
871
877
event_logger = instrumentor_context .event_logger
872
878
choice = _Choice .from_invoke_cohere_command (
873
879
response_body , capture_content
@@ -913,7 +919,7 @@ def _handle_mistral_ai_response(
913
919
span .set_attribute (GEN_AI_USAGE_OUTPUT_TOKENS , estimate_token_count (outputs ["text" ]))
914
920
if "stop_reason" in outputs :
915
921
span .set_attribute (GEN_AI_RESPONSE_FINISH_REASONS , [outputs ["stop_reason" ]])
916
-
922
+
917
923
event_logger = instrumentor_context .event_logger
918
924
choice = _Choice .from_invoke_mistral_mistral (
919
925
response_body , capture_content
0 commit comments