Skip to content

Commit 359d156

Browse files
committed
fix tests and changelog
1 parent 742cb59 commit 359d156

10 files changed

+21
-19
lines changed

CHANGELOG.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
1111
1212
## Unreleased
1313

14+
- `opentelemetry-instrumentation-botocore` Add GenAI instrumentation for additional Bedrock models for InvokeModel API
15+
([#3419](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3419))
16+
1417
## Version 1.32.0/0.53b0 (2025-04-10)
1518

1619
### Added
@@ -21,8 +24,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2124
([#3385](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3385))
2225
- `opentelemetry-instrumentation` Make auto instrumentation use the same dependency resolver as manual instrumentation does
2326
([#3202](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3202))
24-
- `opentelemetry-instrumentation-botocore` Add GenAI instrumentation for additional Bedrock models for InvokeModel API
25-
([#3419](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3419))
2627

2728
### Fixed
2829

instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/extensions/bedrock.py

+10-9
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,9 @@
105105
]
106106

107107
_MODEL_ID_KEY: str = "modelId"
108-
108+
# estimate 6 chars per token for models that don't provide input/output token count in response body.
109+
# https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html
110+
_CHARS_PER_TOKEN: int = 6
109111

110112
class _BedrockRuntimeExtension(_AwsSdkExtension):
111113
"""
@@ -291,7 +293,7 @@ def _extract_claude_attributes(self, attributes, request_body):
291293
def _extract_command_r_attributes(self, attributes, request_body):
292294
prompt = request_body.get("message")
293295
self._set_if_not_none(
294-
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / 6)
296+
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / _CHARS_PER_TOKEN)
295297
)
296298
self._set_if_not_none(
297299
attributes, GEN_AI_REQUEST_MAX_TOKENS, request_body.get("max_tokens")
@@ -309,7 +311,7 @@ def _extract_command_r_attributes(self, attributes, request_body):
309311
def _extract_command_attributes(self, attributes, request_body):
310312
prompt = request_body.get("prompt")
311313
self._set_if_not_none(
312-
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / 6)
314+
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / _CHARS_PER_TOKEN)
313315
)
314316
self._set_if_not_none(
315317
attributes, GEN_AI_REQUEST_MAX_TOKENS, request_body.get("max_tokens")
@@ -340,7 +342,7 @@ def _extract_mistral_attributes(self, attributes, request_body):
340342
prompt = request_body.get("prompt")
341343
if prompt:
342344
self._set_if_not_none(
343-
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / 6)
345+
attributes, GEN_AI_USAGE_INPUT_TOKENS, math.ceil(len(prompt) / _CHARS_PER_TOKEN)
344346
)
345347
self._set_if_not_none(
346348
attributes, GEN_AI_REQUEST_MAX_TOKENS, request_body.get("max_tokens")
@@ -379,10 +381,10 @@ def _get_request_messages(self):
379381
system_messages = [{"role": "system", "content": content}]
380382

381383
messages = decoded_body.get("messages", [])
384+
# if no messages interface, convert to messages format from generic API
382385
if not messages:
383386
model_id = self._call_context.params.get(_MODEL_ID_KEY)
384387
if "amazon.titan" in model_id:
385-
# transform old school amazon titan invokeModel api to messages
386388
if input_text := decoded_body.get("inputText"):
387389
messages = [
388390
{"role": "user", "content": [{"text": input_text}]}
@@ -394,7 +396,6 @@ def _get_request_messages(self):
394396
{"role": "user", "content": [{"text": input_text}]}
395397
]
396398
elif "cohere.command" in model_id or "meta.llama" in model_id or "mistral.mistral" in model_id:
397-
# transform old school cohere command api to messages
398399
if input_text := decoded_body.get("prompt"):
399400
messages = [
400401
{"role": "user", "content": [{"text": input_text}]}
@@ -839,7 +840,7 @@ def _handle_cohere_command_r_response(
839840
):
840841
if "text" in response_body:
841842
span.set_attribute(
842-
GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(response_body["text"]) / 6)
843+
GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(response_body["text"]) / _CHARS_PER_TOKEN)
843844
)
844845
if "finish_reason" in response_body:
845846
span.set_attribute(
@@ -863,7 +864,7 @@ def _handle_cohere_command_response(
863864
generations = response_body["generations"][0]
864865
if "text" in generations:
865866
span.set_attribute(
866-
GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(generations["text"]) / 6)
867+
GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(generations["text"]) / _CHARS_PER_TOKEN)
867868
)
868869
if "finish_reason" in generations:
869870
span.set_attribute(
@@ -912,7 +913,7 @@ def _handle_mistral_ai_response(
912913
if "outputs" in response_body:
913914
outputs = response_body["outputs"][0]
914915
if "text" in outputs:
915-
span.set_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(outputs["text"]) / 6))
916+
span.set_attribute(GEN_AI_USAGE_OUTPUT_TOKENS, math.ceil(len(outputs["text"]) / _CHARS_PER_TOKEN))
916917
if "stop_reason" in outputs:
917918
span.set_attribute(GEN_AI_RESPONSE_FINISH_REASONS, [outputs["stop_reason"]])
918919

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_no_content[cohere.command-r].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/cohere.command-r-v1%3A0/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/cohere.command-r-v1%3A0/invoke
3333
response:
3434
body:
3535
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_no_content[cohere.command].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/cohere.command-light-text-v14/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/cohere.command-light-text-v14/invoke
3333
response:
3434
body:
3535
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_no_content[meta.llama].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ interactions:
2626
authorization:
2727
- Bearer test_aws_authorization
2828
method: POST
29-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/meta.llama3-1-70b-instruct-v1%3A0/invoke
29+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/meta.llama3-1-70b-instruct-v1%3A0/invoke
3030
response:
3131
body:
3232
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_no_content[mistral.mistral].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/mistral.mistral-7b-instruct-v0%3A2/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/mistral.mistral-7b-instruct-v0%3A2/invoke
3333
response:
3434
body:
3535
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[cohere.command-r].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/cohere.command-r-v1%3A0/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/cohere.command-r-v1%3A0/invoke
3333
response:
3434
body:
3535
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[cohere.command].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/cohere.command-light-text-v14/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/cohere.command-light-text-v14/invoke
3333
response:
3434
body:
3535
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[meta.llama].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ interactions:
2626
authorization:
2727
- Bearer test_aws_authorization
2828
method: POST
29-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/meta.llama3-1-70b-instruct-v1%3A0/invoke
29+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/meta.llama3-1-70b-instruct-v1%3A0/invoke
3030
response:
3131
body:
3232
string: |-

instrumentation/opentelemetry-instrumentation-botocore/tests/cassettes/test_invoke_model_with_content[mistral.mistral].yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ interactions:
2929
authorization:
3030
- Bearer test_aws_authorization
3131
method: POST
32-
uri: https://bedrock-runtime.us-west-2.amazonaws.com/model/mistral.mistral-7b-instruct-v0%3A2/invoke
32+
uri: https://bedrock-runtime.us-east-1.amazonaws.com/model/mistral.mistral-7b-instruct-v0%3A2/invoke
3333
response:
3434
body:
3535
string: |-

0 commit comments

Comments
 (0)