From 9d5b390ca06ab5836eb3804f04ab75642abc1c63 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Wed, 5 Mar 2025 16:12:15 -0500 Subject: [PATCH 01/14] Remove bespoke request mocker. Replace with direct mocking of the underlying API. --- .../tests/common/base.py | 17 +- .../tests/common/requests_mocker.py | 238 ------------------ .../tests/generate_content/base.py | 133 ++++++++++ .../generate_content/nonstreaming_base.py | 22 +- .../tests/generate_content/streaming_base.py | 12 +- .../tests/generate_content/util.py | 77 ++++-- 6 files changed, 209 insertions(+), 290 deletions(-) delete mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py index 307dafda13..806907ff38 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py @@ -19,7 +19,6 @@ from .instrumentation_context import InstrumentationContext from .otel_mocker import OTelMocker -from .requests_mocker import RequestsMocker class _FakeCredentials(google.auth.credentials.AnonymousCredentials): @@ -31,8 +30,6 @@ class TestCase(unittest.TestCase): def setUp(self): self._otel = OTelMocker() self._otel.install() - self._requests = RequestsMocker() - self._requests.install() self._instrumentation_context = None self._api_key = "test-api-key" self._project = "test-project" @@ -51,10 +48,6 @@ def client(self): self._client = self._create_client() return self._client - @property - def requests(self): - return self._requests - @property def otel(self): return self._otel @@ -62,6 +55,15 @@ def otel(self): def set_use_vertex(self, use_vertex): self._uses_vertex = use_vertex + def reset_client(self): + self._client = None + + def reset_instrumentation(self): + if self._instrumentation_context is None: + return + self._instrumentation_context.uninstall() + self._instrumentation_context = None + def _create_client(self): self._lazy_init() if self._uses_vertex: @@ -77,5 +79,4 @@ def _create_client(self): def tearDown(self): if self._instrumentation_context is not None: self._instrumentation_context.uninstall() - self._requests.uninstall() self._otel.uninstall() diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py deleted file mode 100644 index 1838ad4e79..0000000000 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/requests_mocker.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This file defines a "RequestMocker" that facilities mocking the "requests" -# API. There are a few reasons that we use this approach to testing: -# -# 1. Security - although "vcrpy" provides a means of filtering data, -# it can be error-prone; use of this solution risks exposing API keys, -# auth tokens, etc. It can also inadvertently record fields that are -# visibility-restricted (such as fields that are returned and available -# when recording using privileged API keys where such fields would not -# ordinarily be returned to users with non-privileged API keys). -# -# 2. Reproducibility - although the tests may be reproducible once the -# recording is present, updating the recording often has external -# dependencies that may be difficult to reproduce. -# -# 3. Costs - there are both time costs and monetary costs to the external -# dependencies required for a record/replay solution. -# -# Because they APIs that need to be mocked are simple enough and well documented -# enough, it seems approachable to mock the requests library, instead. - -import copy -import functools -import http.client -import io -import json -from typing import Optional - -import requests -import requests.sessions - - -class RequestsCallArgs: - def __init__( - self, - session: requests.sessions.Session, - request: requests.PreparedRequest, - **kwargs, - ): - self._session = session - self._request = request - self._kwargs = kwargs - - @property - def session(self): - return self._session - - @property - def request(self): - return self._request - - @property - def kwargs(self): - return self._kwargs - - -class RequestsCall: - def __init__(self, args: RequestsCallArgs, response_generator): - self._args = args - self._response_generator = response_generator - - @property - def args(self): - return self._args - - @property - def response(self): - return self._response_generator(self._args) - - -def _return_error_status( - args: RequestsCallArgs, status_code: int, reason: Optional[str] = None -): - result = requests.Response() - result.url = args.request.url - result.status_code = status_code - result.reason = reason or http.client.responses.get(status_code) - result.request = args.request - return result - - -def _return_404(args: RequestsCallArgs): - return _return_error_status(args, 404, "Not Found") - - -def _to_response_generator(response): - if response is None: - raise ValueError("response must not be None") - if isinstance(response, int): - return lambda args: _return_error_status(args, response) - if isinstance(response, requests.Response): - - def generate_response_from_response(args): - new_response = copy.deepcopy(response) - new_response.request = args.request - new_response.url = args.request.url - return new_response - - return generate_response_from_response - if isinstance(response, dict): - - def generate_response_from_dict(args): - result = requests.Response() - result.status_code = 200 - result.headers["content-type"] = "application/json" - result.encoding = "utf-8" - result.raw = io.BytesIO(json.dumps(response).encode()) - return result - - return generate_response_from_dict - raise ValueError(f"Unsupported response type: {type(response)}") - - -def _to_stream_response_generator(response_generators): - if len(response_generators) == 1: - return response_generators[0] - - def combined_generator(args): - first_response = response_generators[0](args) - if first_response.status_code != 200: - return first_response - result = requests.Response() - result.status_code = 200 - result.headers["content-type"] = "application/json" - result.encoding = "utf-8" - result.headers["transfer-encoding"] = "chunked" - contents = [] - for generator in response_generators: - response = generator(args) - if response.status_code != 200: - continue - response_json = response.json() - response_json_str = json.dumps(response_json) - contents.append(f"data: {response_json_str}") - contents_str = "\r\n".join(contents) - full_contents = f"{contents_str}\r\n\r\n" - result.raw = io.BytesIO(full_contents.encode()) - return result - - return combined_generator - - -class RequestsMocker: - def __init__(self): - self._original_send = requests.sessions.Session.send - self._calls = [] - self._handlers = [] - - def install(self): - @functools.wraps(requests.sessions.Session.send) - def replacement_send( - s: requests.sessions.Session, - request: requests.PreparedRequest, - **kwargs, - ): - return self._do_send(s, request, **kwargs) - - requests.sessions.Session.send = replacement_send - - def uninstall(self): - requests.sessions.Session.send = self._original_send - - def reset(self): - self._calls = [] - self._handlers = [] - - def add_response(self, response, if_matches=None): - self._handlers.append((if_matches, _to_response_generator(response))) - - @property - def calls(self): - return self._calls - - def _do_send( - self, - session: requests.sessions.Session, - request: requests.PreparedRequest, - **kwargs, - ): - stream = kwargs.get("stream", False) - if not stream: - return self._do_send_non_streaming(session, request, **kwargs) - return self._do_send_streaming(session, request, **kwargs) - - def _do_send_streaming( - self, - session: requests.sessions.Session, - request: requests.PreparedRequest, - **kwargs, - ): - args = RequestsCallArgs(session, request, **kwargs) - response_generators = [] - for matcher, response_generator in self._handlers: - if matcher is None: - response_generators.append(response_generator) - elif matcher(args): - response_generators.append(response_generator) - if not response_generators: - response_generators.append(_return_404) - response_generator = _to_stream_response_generator(response_generators) - call = RequestsCall(args, response_generator) - result = call.response - self._calls.append(call) - return result - - def _do_send_non_streaming( - self, - session: requests.sessions.Session, - request: requests.PreparedRequest, - **kwargs, - ): - args = RequestsCallArgs(session, request, **kwargs) - response_generator = self._lookup_response_generator(args) - call = RequestsCall(args, response_generator) - result = call.response - self._calls.append(call) - return result - - def _lookup_response_generator(self, args: RequestsCallArgs): - for matcher, response_generator in self._handlers: - if matcher is None: - return response_generator - if matcher(args): - return response_generator - return _return_404 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py new file mode 100644 index 0000000000..728d516ae7 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py @@ -0,0 +1,133 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import unittest +import unittest.mock + +from google.genai.models import Models, AsyncModels +from ..common.base import TestCase as CommonTestCaseBase +from .util import convert_to_response, create_response + + +class TestCase(CommonTestCaseBase): + # The "setUp" function is defined by "unittest.TestCase" and thus + # this name must be used. Uncertain why pylint doesn't seem to + # recognize that this is a unit test class for which this is inherited. + def setUp(self): # pylint: disable=invalid-name + super().setUp() + if self.__class__ == TestCase: + raise unittest.SkipTest("Skipping testcase base.") + self._generate_content_mock = None + self._generate_content_stream_mock = None + self._original_generate_content = Models.generate_content + self._original_generate_content_stream = Models.generate_content_stream + self._original_async_generate_content = AsyncModels.generate_content + self._original_async_generate_content_stream = ( + AsyncModels.generate_content_stream + ) + self._responses = [] + self._response_index = 0 + + @property + def mock_generate_content(self): + if self._generate_content_mock is None: + self._create_and_install_mocks() + return self._generate_content_mock + + @property + def mock_generate_content_stream(self): + if self._generate_content_stream_mock is None: + self._create_and_install_mocks() + return self._generate_content_stream_mock + + def configure_valid_response(self, **kwargs): + self._create_and_install_mocks() + response = create_response(**kwargs) + self._responses.append(response) + + def _create_and_install_mocks(self): + if self._generate_content_mock is not None: + return + self.reset_client() + self.reset_instrumentation() + self._generate_content_mock = self._create_nonstream_mock() + self._generate_content_stream_mock = self._create_stream_mock() + self._install_mocks() + + def _create_nonstream_mock(self): + mock = unittest.mock.MagicMock() + def _default_impl(*args, **kwargs): + if not self._responses: + return create_response(text="Some response") + index = self._response_index % len(self._responses) + result = self._responses[index] + self._response_index += 1 + return result + mock.side_effect = _default_impl + return mock + + def _create_stream_mock(self): + mock = unittest.mock.MagicMock() + def _default_impl(*args, **kwargs): + for response in self._responses: + yield response + mock.side_effect = _default_impl + return mock + + def _install_mocks(self): + output_wrapped = self._wrap_output(self._generate_content_mock) + output_wrapped_stream = self._wrap_output_stream(self._generate_content_stream_mock) + Models.generate_content = output_wrapped + Models.generate_content_stream = output_wrapped_stream + AsyncModels.generate_content = self._async_wrapper(output_wrapped) + AsyncModels.generate_content_stream = self._async_stream_wrapper(output_wrapped_stream) + + def _wrap_output(self, mock_generate_content): + def _wrapped(*args, **kwargs): + return convert_to_response(mock_generate_content(*args, **kwargs)) + return _wrapped + + def _wrap_output_stream(self, mock_generate_content_stream): + def _wrapped(*args, **kwargs): + for output in mock_generate_content_stream(*args, **kwargs): + yield convert_to_response(output) + return _wrapped + + def _async_wrapper(self, mock_generate_content): + async def _wrapped(*args, **kwargs): + return mock_generate_content(*args, **kwargs) + return _wrapped + + def _async_stream_wrapper(self, mock_generate_content_stream): + async def _wrapped(*args, **kwargs): + async def _internal_generator(): + for result in mock_generate_content_stream(*args, **kwargs): + yield result + return _internal_generator() + return _wrapped + + def tearDown(self): + super().tearDown() + if self._generate_content_mock is None: + assert Models.generate_content == self._original_generate_content + assert Models.generate_content_stream == self._original_generate_content_stream + assert AsyncModels.generate_content == self._original_async_generate_content + assert AsyncModels.generate_content_stream == self._original_async_generate_content_stream + Models.generate_content = self._original_generate_content + Models.generate_content_stream = self._original_generate_content_stream + AsyncModels.generate_content = self._original_async_generate_content + AsyncModels.generate_content_stream = ( + self._original_async_generate_content_stream + ) \ No newline at end of file diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py index 0bd8e8d9c3..9bd5df8157 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/nonstreaming_base.py @@ -16,8 +16,7 @@ import os import unittest -from ..common.base import TestCase -from .util import create_valid_response +from .base import TestCase class NonStreamingTestCase(TestCase): @@ -36,18 +35,15 @@ def generate_content(self, *args, **kwargs): def expected_function_name(self): raise NotImplementedError("Must implement 'expected_function_name'.") - def configure_valid_response(self, *args, **kwargs): - self.requests.add_response(create_valid_response(*args, **kwargs)) - def test_instrumentation_does_not_break_core_functionality(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") response = self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) self.assertEqual(response.text, "Yep, it works!") def test_generates_span(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") response = self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) @@ -55,7 +51,7 @@ def test_generates_span(self): self.otel.assert_has_span_named("generate_content gemini-2.0-flash") def test_model_reflected_into_span_name(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") response = self.generate_content( model="gemini-1.5-flash", contents="Does this work?" ) @@ -63,7 +59,7 @@ def test_model_reflected_into_span_name(self): self.otel.assert_has_span_named("generate_content gemini-1.5-flash") def test_generated_span_has_minimal_genai_attributes(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) @@ -75,7 +71,7 @@ def test_generated_span_has_minimal_genai_attributes(self): ) def test_generated_span_has_correct_function_name(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) @@ -87,7 +83,7 @@ def test_generated_span_has_correct_function_name(self): def test_generated_span_has_vertex_ai_system_when_configured(self): self.set_use_vertex(True) - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) @@ -170,7 +166,7 @@ def test_records_response_as_log(self): os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( "true" ) - self.configure_valid_response(response_text="Some response content") + self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") event_record = self.otel.get_event_named("gen_ai.choice") @@ -183,7 +179,7 @@ def test_does_not_record_response_as_log_if_disabled_by_env(self): os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = ( "false" ) - self.configure_valid_response(response_text="Some response content") + self.configure_valid_response(text="Some response content") self.generate_content(model="gemini-2.0-flash", contents="Some input") self.otel.assert_has_event_named("gen_ai.choice") event_record = self.otel.get_event_named("gen_ai.choice") diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py index 96c8c0ca1f..66985ce399 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py @@ -14,9 +14,7 @@ import unittest -from ..common.base import TestCase -from .util import create_valid_response - +from .base import TestCase class StreamingTestCase(TestCase): # The "setUp" function is defined by "unittest.TestCase" and thus @@ -34,11 +32,9 @@ def generate_content(self, *args, **kwargs): def expected_function_name(self): raise NotImplementedError("Must implement 'expected_function_name'.") - def configure_valid_response(self, *args, **kwargs): - self.requests.add_response(create_valid_response(*args, **kwargs)) def test_instrumentation_does_not_break_core_functionality(self): - self.configure_valid_response(response_text="Yep, it works!") + self.configure_valid_response(text="Yep, it works!") responses = self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) @@ -47,8 +43,8 @@ def test_instrumentation_does_not_break_core_functionality(self): self.assertEqual(response.text, "Yep, it works!") def test_handles_multiple_ressponses(self): - self.configure_valid_response(response_text="First response") - self.configure_valid_response(response_text="Second response") + self.configure_valid_response(text="First response") + self.configure_valid_response(text="Second response") responses = self.generate_content( model="gemini-2.0-flash", contents="Does this work?" ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py index d658f932e8..6f56b5b5b5 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py @@ -12,27 +12,58 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional, Union -def create_valid_response( - response_text="The model response", input_tokens=10, output_tokens=20 -): - return { - "modelVersion": "gemini-2.0-flash-test123", - "usageMetadata": { - "promptTokenCount": input_tokens, - "candidatesTokenCount": output_tokens, - "totalTokenCount": input_tokens + output_tokens, - }, - "candidates": [ - { - "content": { - "role": "model", - "parts": [ - { - "text": response_text, - } - ], - } - } - ], - } +import google.genai +import google.genai.types as genai_types + + +def create_response( + part: Optional[genai_types.Part] = None, + parts: Optional[list[genai_types.Part]] = None, + content: Optional[genai_types.Content] = None, + candidate: Optional[genai_types.Candidate] = None, + candidates: Optional[list[genai_types.Candidate]] = None, + text: Optional[str] = None, + input_tokens: Optional[int]=None, + output_tokens: Optional[int]=None, + model_version: Optional[str]=None, + usage_metadata: Optional[genai_types.GenerateContentResponseUsageMetadata]=None, + **kwargs) -> genai_types.GenerateContentResponse: + # Build up the "candidates" subfield + if text is None: + text = 'Some response text' + if part is None: + part = genai_types.Part(text=text) + if parts is None: + parts = [part] + if content is None: + content = genai_types.Content(parts=parts, role='model') + if candidate is None: + candidate = genai_types.Candidate(content=content) + if candidates is None: + candidates = [candidate] + + # Build up the "usage_metadata" subfield + if usage_metadata is None: + usage_metadata = genai_types.GenerateContentResponseUsageMetadata() + if input_tokens is not None: + usage_metadata.prompt_token_count = input_tokens + if output_tokens is not None: + usage_metadata.candidates_token_count = output_tokens + return genai_types.GenerateContentResponse( + candidates=candidates, + usage_metadata=usage_metadata, + model_version=model_version, + **kwargs) + + +def convert_to_response( + arg: Union[str, genai_types.GenerateContentResponse, dict]) -> genai_types.GenerateContentResponse: + if isinstance(arg, str): + return create_response(text=arg) + if isinstance(arg, genai_types.GenerateContentResponse): + return arg + if isinstance(arg, dict): + return create_response(**arg) + raise ValueError(f"Unsure how to convert {arg} of type {arg.__class__.__name__} to response.") From 82ea24ad7781e3a60516a60dbcd34d1d32c22f0a Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Wed, 5 Mar 2025 17:17:54 -0500 Subject: [PATCH 02/14] Refactor fake credentials to enable reuse. --- .../tests/common/auth.py | 20 +++++++++++++++++++ .../tests/common/base.py | 8 ++------ 2 files changed, 22 insertions(+), 6 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py new file mode 100644 index 0000000000..b58b7284a2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -0,0 +1,20 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import google.auth + +class FakeCredentials(google.auth.credentials.AnonymousCredentials): + + def refresh(self, request): + pass \ No newline at end of file diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py index 806907ff38..0303a1e67d 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py @@ -17,15 +17,11 @@ import google.genai +from .auth import FakeCredentials from .instrumentation_context import InstrumentationContext from .otel_mocker import OTelMocker -class _FakeCredentials(google.auth.credentials.AnonymousCredentials): - def refresh(self, request): - pass - - class TestCase(unittest.TestCase): def setUp(self): self._otel = OTelMocker() @@ -36,7 +32,7 @@ def setUp(self): self._location = "test-location" self._client = None self._uses_vertex = False - self._credentials = _FakeCredentials() + self._credentials = FakeCredentials() def _lazy_init(self): self._instrumentation_context = InstrumentationContext() From 3f621c21ef8a333d14d7daafd9ec9cae5764c3ac Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Wed, 5 Mar 2025 17:25:21 -0500 Subject: [PATCH 03/14] Add module to test end to end with a real client. --- .../tests/generate_content/test_e2e.py | 186 ++++++++++++++++++ 1 file changed, 186 insertions(+) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py new file mode 100644 index 0000000000..e879c75e13 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -0,0 +1,186 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import google.genai +from google.genai import types as genai_types +import os +import vcr +from vcr.record_mode import RecordMode +import logging +import asyncio +import pytest + +from ..common.auth import FakeCredentials + +from opentelemetry.instrumentation.google_genai import ( + GoogleGenAiSdkInstrumentor, +) + + +@pytest.fixture +def instrumentor(): + return GoogleGenAiSdkInstrumentor() + + +@pytest.fixture(autouse=True) +def setup_instrumentation(instrumentor): + instrumentor.instrument() + yield + instrumentor.uninstrument() + + +@pytest.fixture(autouse=True, params=[True, False]) +def setup_content_recording(request): + os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(request.param) + + +@pytest.fixture +def vcr_record_mode(vcr): + return vcr.record_mode + + +@pytest.fixture +def in_replay_mode(vcr_record_mode): + return vcr_record_mode == RecordMode.NONE + + +@pytest.fixture +def gcloud_project(in_replay_mode): + if in_replay_mode: + return "test-project" + _, from_creds = google.auth.default() + return from_creds + + +@pytest.fixture +def gcloud_location(in_replay_mode): + if in_replay_mode: + return "test-location" + return os.getenv("GCLOUD_LOCATION") + + +@pytest.fixture +def gcloud_credentials(in_replay_mode): + if in_replay_mode: + return FakeCredentials() + creds, _ = google.auth.default() + return creds + + +@pytest.fixture(autouse=True) +def gcloud_api_key(in_replay_mode): + if in_replay_mode: + os.environ["GOOGLE_API_KEY"] = "test-api-key" + return "test-api-key" + return os.getenv("GOOGLE_API_KEY") + + +@pytest.fixture +def nonvertex_client_factory(gcloud_api_key): + def _factory(): + return google.genai.Client(api_key=gcloud_api_key) + return _factory + + +@pytest.fixture +def vertex_client_factory(in_replay_mode): + def _factory(): + return google.genai.Client( + vertexai=True, + project=gcloud_project, + location=gcloud_location, + credentials=gcloud_credentials) + return _factory + + +@pytest.fixture(params=[True, False]) +def use_vertex(request): + return request.param + + +@pytest.fixture +def client(vertex_client_factory, nonvertex_client_factory, use_vertex): + if use_vertex: + return vertex_client_factory() + return nonvertex_client_factory() + + +@pytest.fixture(params=[True, False]) +def is_async(request): + return request.param + + +@pytest.fixture(params=["gemini-1.0-flash", "gemini-2.0-flash"]) +def model(request): + return request.param + + +@pytest.fixture +def generate_content(client, is_async): + def _sync_impl(*args, **kwargs): + return client.models.generate_content(*args, **kwargs) + + def _async_impl(*args, **kwargs): + return asyncio.run(client.aio.models.generate_content(*args, **kwargs)) + + if is_async: + return _async_impl + return _sync_impl + + +@pytest.fixture +def generate_content_stream(client, is_async): + def _sync_impl(*args, **kwargs): + results = [] + for result in client.models.generate_content_stream(*args, **kwargs): + results.append(result) + return results + + def _async_impl(*args, **kwargs): + async def _gather_all(): + results = [] + async for result in await client.aio.models.generate_content_stream(*args, **kwargs): + results.append(result) + return results + return asyncio.run(_gather_all()) + + if is_async: + return _async_impl + return _sync_impl + + +@pytest.mark.vcr +def test_single_response(generate_content, model): + response = generate_content( + model=model, + contents="Create a poem about Open Telemetry.") + assert response is not None + assert response.text is not None + assert len(response.text) > 0 + + +@pytest.mark.vcr +def test_multiple_responses(generate_content_stream, model): + count = 0 + for response in generate_content_stream( + model=model, + contents="Create a poem about Open Telemetry.", + config=genai_types.GenerateContentConfig(candidate_count=2)): + assert response is not None + assert response.text is not None + assert len(response.text) > 0 + count += 1 + assert count == 2 + From 4424465741470272f371d35c8db1ca95b520522d Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Thu, 6 Mar 2025 11:22:59 -0500 Subject: [PATCH 04/14] Add redaction and minimal OTel mocking/testing in the e2e test. --- .../tests/generate_content/test_e2e.py | 82 ++++++++++++++++++- 1 file changed, 79 insertions(+), 3 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index e879c75e13..1e401fbba9 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -12,6 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""High level end-to-end test of the generate content mocking. + +The primary purpose of this test is to verify that the instrumentation +package does not break the underlying GenAI SDK that it instruments. + +This test suite also has some minimal validation of the instrumentation +outputs; however, validating the instrumentation output (other than +verifying that instrumentation does not break the GenAI SDK) is a +secondary goal of this test. Detailed testing of the instrumentation +output is the purview of the other tests in this directory.""" import google.genai from google.genai import types as genai_types @@ -23,12 +33,69 @@ import pytest from ..common.auth import FakeCredentials +from ..common.otel_mocker import OTelMocker from opentelemetry.instrumentation.google_genai import ( GoogleGenAiSdkInstrumentor, ) +def _should_redact_header(header_key): + if header_key.startswith('x-goog'): + return True + if header_key.startswith('sec-goog'): + return True + return False + + +def _redact_headers(headers): + for header_key in headers: + if _should_redact_header(header_key.lower()): + del headers[header_key] + + +def _before_record_request(request): + _redact_headers(request.headers) + return request + + +def _before_record_response(response): + _redact_headers(response.headers) + return response + + +@pytest.fixture(scope='module') +def vcr_config(): + return { + 'filter_query_parameters': [ + 'key', + 'apiKey', + 'quotaUser', + 'userProject', + 'token', + 'access_token', + 'accessToken', + 'refesh_token', + 'refreshToken', + 'authuser', + 'bearer', + 'bearer_token', + 'bearerToken', + 'userIp', + ], + 'filter_post_data_parameters': [ + 'apikey', + 'api_key', + 'key' + ], + 'filter_headers': [ + 'authorization', + ], + 'before_record_request': _before_record_request, + 'before_record_response': _before_record_response, + } + + @pytest.fixture def instrumentor(): return GoogleGenAiSdkInstrumentor() @@ -41,6 +108,14 @@ def setup_instrumentation(instrumentor): instrumentor.uninstrument() +@pytest.fixture(autouse=True) +def otel_mocker(): + result = OTelMocker() + result.install() + yield result + result.uninstall() + + @pytest.fixture(autouse=True, params=[True, False]) def setup_content_recording(request): os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(request.param) @@ -162,17 +237,18 @@ async def _gather_all(): @pytest.mark.vcr -def test_single_response(generate_content, model): +def test_single_response(generate_content, model, otel_mocker): response = generate_content( model=model, contents="Create a poem about Open Telemetry.") assert response is not None assert response.text is not None assert len(response.text) > 0 + otel_mocker.assert_has_span_named(f"generate_content {model}") @pytest.mark.vcr -def test_multiple_responses(generate_content_stream, model): +def test_multiple_responses(generate_content_stream, model, otel_mocker): count = 0 for response in generate_content_stream( model=model, @@ -183,4 +259,4 @@ def test_multiple_responses(generate_content_stream, model): assert len(response.text) > 0 count += 1 assert count == 2 - + otel_mocker.assert_has_span_named(f"generate_content {model}") From 14257890f2d60d84a16393f67ecf5fbc9ed6d87b Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Thu, 6 Mar 2025 11:27:32 -0500 Subject: [PATCH 05/14] Fix wording of the documentation. --- .../tests/generate_content/test_e2e.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 1e401fbba9..f00f8c6603 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""High level end-to-end test of the generate content mocking. +"""High level end-to-end test of the generate content instrumentation. The primary purpose of this test is to verify that the instrumentation package does not break the underlying GenAI SDK that it instruments. From 7b5605be6f4f0c57fbf0c0a09619a03c8ce6fc72 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Thu, 6 Mar 2025 11:29:20 -0500 Subject: [PATCH 06/14] Remove vcr migration from TODOs. --- .../opentelemetry-instrumentation-google-genai/TODOS.md | 1 - 1 file changed, 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md b/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md index 5bd8ec50fa..16a8299e2a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/TODOS.md @@ -13,7 +13,6 @@ Here are some TODO items required to achieve stability for this package: - Additional cleanup/improvement tasks such as: - Adoption of 'wrapt' instead of 'functools.wraps' - Bolstering test coverage - - Migrate tests to use VCR.py ## Future From 38be0b1c9bc7e20f5eebc568922aef8a4e6e48bd Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Thu, 6 Mar 2025 15:58:27 -0500 Subject: [PATCH 07/14] Improve redaction and test naming. --- .../tests/common/auth.py | 5 +- .../tests/generate_content/test_e2e.py | 140 ++++++++++++++++-- 2 files changed, 131 insertions(+), 14 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index b58b7284a2..d6ef5b66d8 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import google.auth +import google.auth.credentials + class FakeCredentials(google.auth.credentials.AnonymousCredentials): def refresh(self, request): - pass \ No newline at end of file + pass diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index f00f8c6603..1e462b16f0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -23,6 +23,12 @@ secondary goal of this test. Detailed testing of the instrumentation output is the purview of the other tests in this directory.""" + +import subprocess +import json +import yaml +import google.auth +import google.auth.credentials import google.genai from google.genai import types as genai_types import os @@ -45,22 +51,28 @@ def _should_redact_header(header_key): return True if header_key.startswith('sec-goog'): return True + if header_key in ['server', 'server-timing']: + return True return False def _redact_headers(headers): + to_redact = [] for header_key in headers: if _should_redact_header(header_key.lower()): - del headers[header_key] - + to_redact.append(header_key) + for header_key in to_redact: + headers[header_key] = "" def _before_record_request(request): - _redact_headers(request.headers) + if request.headers: + _redact_headers(request.headers) return request def _before_record_response(response): - _redact_headers(response.headers) + if hasattr(response, "headers") and response.headers: + _redact_headers(response.headers) return response @@ -89,13 +101,91 @@ def vcr_config(): 'key' ], 'filter_headers': [ + 'x-goog-api-key', 'authorization', + 'server', + 'Server' + 'Server-Timing', + 'Date', ], 'before_record_request': _before_record_request, 'before_record_response': _before_record_response, + 'ignore_hosts': [ + 'oauth2.googleapis.com', + 'iam.googleapis.com', + ], } +class _LiteralBlockScalar(str): + """Formats the string as a literal block scalar, preserving whitespace and + without interpreting escape characters""" + + +def _literal_block_scalar_presenter(dumper, data): + """Represents a scalar string as a literal block, via '|' syntax""" + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") + + +@pytest.fixture(scope="module", autouse=True) +def setup_yaml_pretty_formattinmg(): + yaml.add_representer(_LiteralBlockScalar, _literal_block_scalar_presenter) + + +def _process_string_value(string_value): + """Pretty-prints JSON or returns long strings as a LiteralBlockScalar""" + try: + json_data = json.loads(string_value) + return _LiteralBlockScalar(json.dumps(json_data, indent=2)) + except (ValueError, TypeError): + if len(string_value) > 80: + return _LiteralBlockScalar(string_value) + return string_value + + +def _convert_body_to_literal(data): + """Searches the data for body strings, attempting to pretty-print JSON""" + if isinstance(data, dict): + for key, value in data.items(): + # Handle response body case (e.g., response.body.string) + if key == "body" and isinstance(value, dict) and "string" in value: + value["string"] = _process_string_value(value["string"]) + + # Handle request body case (e.g., request.body) + elif key == "body" and isinstance(value, str): + data[key] = _process_string_value(value) + + else: + _convert_body_to_literal(value) + + elif isinstance(data, list): + for idx, choice in enumerate(data): + data[idx] = _convert_body_to_literal(choice) + + return data + + +class _PrettyPrintJSONBody: + """This makes request and response body recordings more readable.""" + + @staticmethod + def serialize(cassette_dict): + cassette_dict = _convert_body_to_literal(cassette_dict) + return yaml.dump( + cassette_dict, default_flow_style=False, allow_unicode=True + ) + + @staticmethod + def deserialize(cassette_string): + return yaml.load(cassette_string, Loader=yaml.Loader) + + +@pytest.fixture(scope="module", autouse=True) +def setup_vcr(vcr): + vcr.register_serializer("yaml", _PrettyPrintJSONBody) + return vcr + + @pytest.fixture def instrumentor(): return GoogleGenAiSdkInstrumentor() @@ -116,9 +206,10 @@ def otel_mocker(): result.uninstall() -@pytest.fixture(autouse=True, params=[True, False]) +@pytest.fixture(autouse=True, params=["logcontent", "excludecontent"]) def setup_content_recording(request): - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(request.param) + enabled = request.param == "logcontent" + os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(enabled) @pytest.fixture @@ -131,10 +222,29 @@ def in_replay_mode(vcr_record_mode): return vcr_record_mode == RecordMode.NONE +def _try_get_project_from_gcloud(): + try: + gcloud_call_result = subprocess.run("gcloud config get project", shell=True, capture_output=True) + except subprocess.CalledProcessError: + return None + gcloud_output = gcloud_call_result.stdout.decode() + return gcloud_output.strip() + + @pytest.fixture def gcloud_project(in_replay_mode): if in_replay_mode: return "test-project" + project_envs = ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"] + for project_env in project_envs: + project_env_val = os.getenv(project_env) + if project_env_val: + return project_env_val + from_gcloud = _try_get_project_from_gcloud() + if from_gcloud: + os.environ["GOOGLE_CLOUD_PROJECT"] = from_gcloud + os.environ["GCLOUD_PROJECT"] = from_gcloud + return from_gcloud _, from_creds = google.auth.default() return from_creds @@ -151,7 +261,7 @@ def gcloud_credentials(in_replay_mode): if in_replay_mode: return FakeCredentials() creds, _ = google.auth.default() - return creds + return google.auth.credentials.with_scopes_if_required(creds, ["https://www.googleapis.com/auth/cloud-platform"]) @pytest.fixture(autouse=True) @@ -165,12 +275,13 @@ def gcloud_api_key(in_replay_mode): @pytest.fixture def nonvertex_client_factory(gcloud_api_key): def _factory(): + print(f"Using API key: {gcloud_api_key}") return google.genai.Client(api_key=gcloud_api_key) return _factory @pytest.fixture -def vertex_client_factory(in_replay_mode): +def vertex_client_factory(in_replay_mode, gcloud_project, gcloud_location, gcloud_credentials): def _factory(): return google.genai.Client( vertexai=True, @@ -180,11 +291,16 @@ def _factory(): return _factory -@pytest.fixture(params=[True, False]) -def use_vertex(request): +@pytest.fixture(params=["vertexaiapi", "geminiapi"]) +def genai_sdk_backend(request): return request.param +@pytest.fixture +def use_vertex(genai_sdk_backend): + return genai_sdk_backend == "vertexaiapi" + + @pytest.fixture def client(vertex_client_factory, nonvertex_client_factory, use_vertex): if use_vertex: @@ -192,9 +308,9 @@ def client(vertex_client_factory, nonvertex_client_factory, use_vertex): return nonvertex_client_factory() -@pytest.fixture(params=[True, False]) +@pytest.fixture(params=["sync", "async"]) def is_async(request): - return request.param + return request.param == "async" @pytest.fixture(params=["gemini-1.0-flash", "gemini-2.0-flash"]) From 6a366d0c28bebcb2e4687f133a2a6cec649dde53 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 7 Mar 2025 17:24:08 -0500 Subject: [PATCH 08/14] Minor tweaks in the code generation. Add casette files. --- .../tests/common/base.py | 2 +- ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 +++++++++++++ ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 +++++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 94 +++++++++++++ ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 94 +++++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 97 +++++++++++++ ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 102 ++++++++++++++ ...mini-1.5-flash-002-vertexaiapi-async].yaml | 99 +++++++++++++ ...emini-1.5-flash-002-vertexaiapi-sync].yaml | 99 +++++++++++++ .../tests/generate_content/test_e2e.py | 133 ++++++++++++------ 10 files changed, 865 insertions(+), 43 deletions(-) create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml create mode 100644 instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py index 0303a1e67d..1624b47868 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/base.py @@ -70,7 +70,7 @@ def _create_client(self): location=self._location, credentials=self._credentials, ) - return google.genai.Client(api_key=self._api_key) + return google.genai.Client(vertexai=False, api_key=self._api_key) def tearDown(self): if self._instrumentation_context is not None: diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml new file mode 100644 index 0000000000..c251cc104b --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml @@ -0,0 +1,94 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "No more dark, inscrutable ways,\nTo trace a request through hazy days.\nOpen Telemetry, a beacon bright,\nIlluminates the path, both day and night.\n\nFrom metrics gathered, a clear display,\nOf latency's dance, and errors' sway.\nTraces unwind, a silken thread,\nShowing the journey, from start to head.\n\nLogs interweave, a richer hue,\nContextual clues, for me and you.\nNo vendor lock-in, a freedom's call,\nTo choose your tools, to stand up tall.\n\nExporters aplenty, a varied choice,\nTo send your data, amplify your voice.\nJaeger, Zipkin, Prometheus' might,\nAll integrate, a glorious sight.\n\nWith spans and attributes, a detailed scene,\nOf how your system works, both sleek and keen.\nPerformance bottlenecks, now laid bare,\nOpen Telemetry, beyond compare.\n\nSo embrace the light, let darkness flee,\nWith Open Telemetry, set your systems free.\nObserve, and learn, and optimize with grace,\nA brighter future, in this digital space.\n" + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -0.3303731600443522 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 240, + "totalTokenCount": 248, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 240 + } + ] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-03-07T22:19:18.083091Z", + "responseId": "5nDLZ5OJBdyY3NoPiZGx0Ag" + } + headers: + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..3ae84308bf --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml @@ -0,0 +1,94 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "No more dark logs, a cryptic, hidden trace,\nOf failing systems, lost in time and space.\nOpenTelemetry, a beacon shining bright,\nIlluminating paths, both dark and light.\n\nFrom microservices, a sprawling, tangled mesh,\nTo monolithic beasts, put to the test,\nIt gathers traces, spans, and metrics too,\nA holistic view, for me and you.\n\nWith signals clear, from every single node,\nPerformance bottlenecks, instantly bestowed.\nDistributed tracing, paints a vivid scene,\nWhere latency lurks, and slowdowns intervene.\n\nExporters rise, to send the data forth,\nTo dashboards grand, of proven, measured worth.\nPrometheus, Grafana, Jaeger, fluent streams,\nVisualizing insights, fulfilling data dreams.\n\nFrom Jaeger's diagrams, a branching, flowing art,\nTo Grafana's charts, that play a vital part,\nThe mysteries unravel, hidden deep inside,\nWhere errors slumber, and slow responses hide.\n\nSo hail OpenTelemetry, a gift to all who code,\nA brighter future, on a well-lit road.\nNo more guesswork, no more fruitless chase,\nJust clear observability, in time and space.\n" + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -0.45532724261283875 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 256, + "totalTokenCount": 264, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 256 + } + ] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-03-07T22:19:15.268428Z", + "responseId": "43DLZ4yxEM6F3NoPzaTkiQU" + } + headers: + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml new file mode 100644 index 0000000000..77e985bf28 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml @@ -0,0 +1,94 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "No more dark, mysterious traces,\nNo more guessing, in empty spaces.\nOpenTelemetry's light now shines,\nIlluminating all our designs.\n\nFrom microservices, small and fleet,\nTo monolithic beasts, hard to beat,\nIt weaves a net, both fine and strong,\nWhere metrics flow, where logs belong.\n\nTraces dance, a vibrant hue,\nShowing journeys, old and new.\nSpans unfold, a story told,\nOf requests handled, brave and bold.\n\nMetrics hum, a steady beat,\nLatency, errors, can't be beat.\nDistribution charts, a clear display,\nGuiding us along the way.\n\nLogs provide a detailed view,\nOf what happened, me and you.\nContext rich, with helpful clues,\nDebugging woes, it quickly subdues.\n\nWith exporters wise, a thoughtful choice,\nTo Prometheus, Jaeger, or Zipkin's voice,\nOur data flows, a precious stream,\nReal-time insights, a waking dream.\n\nSo hail to OpenTelemetry's might,\nBringing clarity to our darkest night.\nObservability's champion, bold and true,\nA brighter future, for me and you.\n" + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -0.4071464086238575 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 253, + "totalTokenCount": 261, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 253 + } + ] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-03-07T22:19:12.443989Z", + "responseId": "4HDLZ9WMG6SK698Pr5uZ2Qw" + } + headers: + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..7d3d7a56b2 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_non_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml @@ -0,0 +1,94 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:generateContent + response: + body: + string: |- + { + "candidates": [ + { + "content": { + "role": "model", + "parts": [ + { + "text": "No more dark, mysterious traces,\nOf failing systems, hidden spaces.\nOpen Telemetry's light shines bright,\nGuiding us through the darkest night.\n\nFrom metrics gathered, finely spun,\nTo logs that tell of tasks undone,\nAnd traces linking every call,\nIt answers questions, standing tall.\n\nDistributed systems, complex and vast,\nTheir hidden flaws, no longer cast\nIn shadows deep, beyond our view,\nOpen Telemetry sees them through.\n\nWith spans and attributes, it weaves a tale,\nOf requests flowing, never frail.\nIt pinpoints bottlenecks, slow and grim,\nAnd helps us optimize, system trim.\n\nAcross languages, a common ground,\nWhere data's shared, and insights found.\nExporters whisper, collectors hum,\nA symphony of data, overcome.\n\nSo raise a glass, to this open source,\nA shining beacon, a powerful force.\nOpen Telemetry, a guiding star,\nRevealing secrets, near and far.\n" + } + ] + }, + "finishReason": "STOP", + "avgLogprobs": -0.3586180628193498 + } + ], + "usageMetadata": { + "promptTokenCount": 8, + "candidatesTokenCount": 211, + "totalTokenCount": 219, + "promptTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 8 + } + ], + "candidatesTokensDetails": [ + { + "modality": "TEXT", + "tokenCount": 211 + } + ] + }, + "modelVersion": "gemini-1.5-flash-002", + "createTime": "2025-03-07T22:19:09.936326Z", + "responseId": "3XDLZ4aTOZSpnvgPn-e0qQk" + } + headers: + Content-Encoding: + - gzip + Content-Type: + - application/json; charset=UTF-8 + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml new file mode 100644 index 0000000000..a946911c36 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-async].yaml @@ -0,0 +1,97 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" longer dark, the tracing's light,\\nOpen Telemetry, shining\ + \ bright\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \".\\nA beacon in the coding night,\\nRevealing paths, both\ + \ dark\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ + 2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" and bright.\\n\\nFrom microservice to sprawling beast,\\\ + nIts watchful eye, a silent priest.\\nObserving calls, both small and vast,\\\ + nPerformance\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" flaws, revealed at last.\\n\\nWith metrics gleaned and logs\ + \ aligned,\\nA clearer picture, you will find.\\nOf latency, and errors dire,\\\ + n\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ + ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"And bottlenecks\ + \ that set afire.\\n\\nIt spans the clouds, a network wide,\\nWhere data streams,\ + \ a surging tide.\\nCollecting traces, rich and deep,\\nWhile slumbering apps\ + \ their secrets keep.\\n\\nJaeger, Zip\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"kin, the tools it holds,\\nA tapestry of stories told.\\nOf\ + \ requests flowing, swift and free,\\nOr tangled knots, for all to see.\\\ + n\\nSo embrace the power, understand,\\nThe vital role, across the\"}]}}],\"\ + modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:29.293930Z\"\ + ,\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" land.\\nOpen\ + \ Telemetry, a guiding star,\\nTo navigate the digital afar.\\n\"}]},\"finishReason\"\ + : \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 212,\"totalTokenCount\": 220,\"promptTokensDetails\": [{\"modality\": \"\ + TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ + TEXT\",\"tokenCount\": 212}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ + createTime\": \"2025-03-07T22:19:29.293930Z\",\"responseId\": \"8XDLZ6r4Efa1-O4PwIHamQ4\"\ + }\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..647a76b80a --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[excludecontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"The\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" black box whispers, secrets deep,\\nOf failing systems, promises\ + \ to keep.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nBut tracing's light, a guiding hand,\\nReveals the path\"\ + }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ + ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", across the\ + \ land.\\n\\nOpen Telemetry, a beacon bright,\\nIlluminating pathways, day\ + \ and night.\\nFrom spans and traces, stories told,\"}]}}],\"modelVersion\"\ + : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ + ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nOf requests\ + \ flowing, brave and bold.\\n\\nThe metrics rise, a vibrant chart,\\nDisplaying\ + \ latency, a work of art.\\nEach request'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"s journey, clearly shown,\\nWhere bottlenecks slumber, seeds\ + \ are sown.\\n\\nWith logs appended, context clear,\\nThe root of problems,\ + \ drawing near.\\nObservability's embrace, so wide,\\nUnraveling mysteries,\"\ + }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ + ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" deep inside.\\\ + n\\nFrom simple apps to complex weaves,\\nOpen Telemetry's power achieves,\\\ + nA unified vision, strong and true,\\nMonitoring systems, old and new.\\n\\\ + nNo vendor lock-in, free to roam,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nAcross the clouds, and find your home.\\nA standard rising,\ + \ strong and bold,\\nA future brighter, to behold.\\n\\nSo let the traces\ + \ flow and gleam,\\nOpen Telemetry, a vibrant dream.\\nOf healthy systems,\ + \ running free,\\nFor all to see, for all to be.\"}]}}],\"modelVersion\":\ + \ \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:26.378633Z\"\ + ,\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\n\"}]},\"\ + finishReason\": \"STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"\ + candidatesTokenCount\": 258,\"totalTokenCount\": 266,\"promptTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\"\ + : [{\"modality\": \"TEXT\",\"tokenCount\": 258}]},\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:26.378633Z\",\"responseId\": \"7nDLZ4mOF_Hg-O4P7YfKqQ8\"\ + }\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml new file mode 100644 index 0000000000..9a068aae89 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-async].yaml @@ -0,0 +1,99 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" more dark logs, a cryptic, silent scream,\\nNo more the hunt\ + \ for\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ + 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" errors, a lost, fading dream.\\nOpen Telemetry, a beacon\ + \ in\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"\ + 2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" the night,\\nShining forth its data, clear and burning bright.\\\ + n\\nFrom traces spanning systems, a flowing, silver thread,\\nMetrics pulse\ + \ and measure,\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" insights finely spread.\\nLogs enriched with context, a story\ + \ they unfold,\\nOf requests and responses, both brave and bold.\\n\\nObservability's\ + \ promise\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:23.579184Z\",\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \", a future now at hand,\\nWith vendors interoperable, a collaborative\ + \ band.\\nNo longer vendor lock-in, a restrictive, iron cage,\\nBut freedom\ + \ of selection, turning a new page.\\n\\nFrom microservices humming,\"}]}}],\"\ + modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ + ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" a symphony\ + \ of calls,\\nTo monolithic giants, answering their thralls,\\nOpen Telemetry\ + \ watches, with keen and watchful eye,\\nDetecting the anomalies, before they\ + \ rise and fly.\\n\\nSo let the data flow freely, a\"}]}}],\"modelVersion\"\ + : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ + ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \" river strong\ + \ and deep,\\nIts secrets it will whisper, while the systems sleep.\\nOpen\ + \ Telemetry's power, a force that we can wield,\\nTo build more stable systems,\ + \ in the digital field.\\n\"}]},\"finishReason\": \"STOP\"}],\"usageMetadata\"\ + : {\"promptTokenCount\": 8,\"candidatesTokenCount\": 238,\"totalTokenCount\"\ + : 246,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 8}],\"\ + candidatesTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 238}]},\"\ + modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:23.579184Z\"\ + ,\"responseId\": \"63DLZ_CsI_Hg-O4P7YfKqQ8\"}\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml new file mode 100644 index 0000000000..669f1af93b --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/cassettes/test_streaming[logcontent-gemini-1.5-flash-002-vertexaiapi-sync].yaml @@ -0,0 +1,99 @@ +interactions: +- request: + body: |- + { + "contents": [ + { + "parts": [ + { + "text": "Create a poem about Open Telemetry." + } + ], + "role": "user" + } + ] + } + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '92' + Content-Type: + - application/json + user-agent: + - google-genai-sdk/1.0.0 gl-python/3.12.8 + x-goog-api-client: + - + x-goog-user-project: + - + method: POST + uri: https://test-location-aiplatform.googleapis.com/v1beta1/projects/test-project/locations/test-location/publishers/google/models/gemini-1.5-flash-002:streamGenerateContent?alt=sse + response: + body: + string: "data: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"No\"}]}}],\"usageMetadata\": {},\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \" more dark, mysterious traces,\\nNo more guessing, in time\ + \ and spaces.\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\"\ + : \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"\\nOpen Telemetry's light shines bright,\\nIlluminating the\ + \ code'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\":\ + \ \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"s dark night.\\n\\nFrom spans and metrics, a story told,\\\ + nOf requests flowing, both brave and bold.\\nTraces weaving, a tapestry grand,\"\ + }]}}],\"modelVersion\": \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ + ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \"\\nShowing\ + \ performance, across the land.\\n\\nLogs and metrics, a perfect blend,\\\ + nInformation's flow, without end.\\nObservability's promise\"}]}}],\"modelVersion\"\ + : \"gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\"\ + ,\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\"\ + : [{\"content\": {\"role\": \"model\",\"parts\": [{\"text\": \", clear and\ + \ true,\\nInsights revealed, for me and you.\\n\\nJaeger, Zipkin, a chorus\ + \ sings,\\nWith exporters ready, for all the things.\\nFrom simple apps to\ + \ systems vast,\\nOpen Telemetry'\"}]}}],\"modelVersion\": \"gemini-1.5-flash-002\"\ + ,\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ + }\r\n\r\ndata: {\"candidates\": [{\"content\": {\"role\": \"model\",\"parts\"\ + : [{\"text\": \"s power will last.\\n\\nNo vendor lock-in, a freedom sweet,\\\ + nOpen source glory, can't be beat.\\nSo let us embrace, this modern way,\\\ + nTo monitor systems, come what may.\\n\\nFrom\"}]}}],\"modelVersion\": \"\ + gemini-1.5-flash-002\",\"createTime\": \"2025-03-07T22:19:20.770456Z\",\"\ + responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"}\r\n\r\ndata: {\"candidates\": [{\"\ + content\": {\"role\": \"model\",\"parts\": [{\"text\": \" microservices, small\ + \ and slight,\\nTo monolithic giants, shining bright,\\nOpen Telemetry shows\ + \ the path,\\nTo understand, and fix the wrath,\\nOf latency demons, lurking\ + \ near,\\nBringing clarity, year after year.\\n\"}]},\"finishReason\": \"\ + STOP\"}],\"usageMetadata\": {\"promptTokenCount\": 8,\"candidatesTokenCount\"\ + : 242,\"totalTokenCount\": 250,\"promptTokensDetails\": [{\"modality\": \"\ + TEXT\",\"tokenCount\": 8}],\"candidatesTokensDetails\": [{\"modality\": \"\ + TEXT\",\"tokenCount\": 242}]},\"modelVersion\": \"gemini-1.5-flash-002\",\"\ + createTime\": \"2025-03-07T22:19:20.770456Z\",\"responseId\": \"6HDLZ5iDL86F3NoPzaTkiQU\"\ + }\r\n\r\n" + headers: + Content-Disposition: + - attachment + Content-Type: + - text/event-stream + Transfer-Encoding: + - chunked + Vary: + - Origin + - X-Origin + - Referer + X-Content-Type-Options: + - nosniff + X-Frame-Options: + - SAMEORIGIN + X-XSS-Protection: + - '0' + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 1e462b16f0..87fd8fd1a6 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -37,6 +37,7 @@ import logging import asyncio import pytest +import urllib.parse from ..common.auth import FakeCredentials from ..common.otel_mocker import OTelMocker @@ -46,6 +47,55 @@ ) +_FAKE_PROJECT = "test-project" +_FAKE_LOCATION = "test-location" +_FAKE_API_KEY = "test-api-key" +_DEFAULT_REAL_LOCATION = "us-central1" + +def _get_project_from_env(): + return os.getenv("GCLOUD_PROJECT") or os.getenv("GOOGLE_CLOUD_PROJECT") or "" + + +def _get_project_from_gcloud_cli(): + try: + gcloud_call_result = subprocess.run("gcloud config get project", shell=True, capture_output=True) + except subprocess.CalledProcessError: + return None + gcloud_output = gcloud_call_result.stdout.decode() + return gcloud_output.strip() + + +def _get_project_from_credentials(): + _, from_creds = google.auth.default() + return from_creds + + +def _get_real_project(): + from_env = _get_project_from_env() + if from_env: + return from_env + from_cli = _get_project_from_gcloud_cli() + if from_cli: + return from_cli + return _get_project_from_credentials() + + +def _get_location_from_env(): + return os.getenv("GCLOUD_LOCATION") or os.getenv("GOOGLE_CLOUD_LOCATION") or "" + + +def _get_real_location(): + return _get_location_from_env() or _DEFAULT_REAL_LOCATION + + +def _get_vertex_api_key_from_env(): + return os.getenv("GOOGLE_API_KEY") + + +def _get_gemini_api_key_from_env(): + return os.getenv("GEMINI_API_KEY") + + def _should_redact_header(header_key): if header_key.startswith('x-goog'): return True @@ -67,6 +117,15 @@ def _redact_headers(headers): def _before_record_request(request): if request.headers: _redact_headers(request.headers) + uri = request.uri + project = _get_project_from_env() + if project: + uri = uri.replace(f"projects/{project}", f"projects/{_FAKE_PROJECT}") + location = _get_real_location() + if location: + uri = uri.replace(f"locations/{location}", f"locations/{_FAKE_LOCATION}") + uri = uri.replace(f"//{location}-aiplatform.googleapis.com", f"//{_FAKE_LOCATION}-aiplatform.googleapis.com") + request.uri = uri return request @@ -222,38 +281,22 @@ def in_replay_mode(vcr_record_mode): return vcr_record_mode == RecordMode.NONE -def _try_get_project_from_gcloud(): - try: - gcloud_call_result = subprocess.run("gcloud config get project", shell=True, capture_output=True) - except subprocess.CalledProcessError: - return None - gcloud_output = gcloud_call_result.stdout.decode() - return gcloud_output.strip() - -@pytest.fixture +@pytest.fixture(autouse=True) def gcloud_project(in_replay_mode): if in_replay_mode: - return "test-project" - project_envs = ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"] - for project_env in project_envs: - project_env_val = os.getenv(project_env) - if project_env_val: - return project_env_val - from_gcloud = _try_get_project_from_gcloud() - if from_gcloud: - os.environ["GOOGLE_CLOUD_PROJECT"] = from_gcloud - os.environ["GCLOUD_PROJECT"] = from_gcloud - return from_gcloud - _, from_creds = google.auth.default() - return from_creds - + return _FAKE_PROJECT + result = _get_real_project() + for env_var in ["GCLOUD_PROJECT", "GOOGLE_CLOUD_PROJECT"]: + os.environ[env_var] = result + return result + @pytest.fixture def gcloud_location(in_replay_mode): if in_replay_mode: - return "test-location" - return os.getenv("GCLOUD_LOCATION") + return _FAKE_LOCATION + return _get_real_location() @pytest.fixture @@ -264,24 +307,29 @@ def gcloud_credentials(in_replay_mode): return google.auth.credentials.with_scopes_if_required(creds, ["https://www.googleapis.com/auth/cloud-platform"]) -@pytest.fixture(autouse=True) -def gcloud_api_key(in_replay_mode): +@pytest.fixture +def gemini_api_key(in_replay_mode): if in_replay_mode: - os.environ["GOOGLE_API_KEY"] = "test-api-key" - return "test-api-key" + return _FAKE_API_KEY + return os.getenv("GEMINI_API_KEY") + + +@pytest.fixture(autouse=True) +def gcloud_api_key(gemini_api_key): + if "GOOGLE_API_KEY" not in os.environ: + os.environ["GOOGLE_API_KEY"] = gemini_api_key return os.getenv("GOOGLE_API_KEY") @pytest.fixture -def nonvertex_client_factory(gcloud_api_key): +def nonvertex_client_factory(gemini_api_key): def _factory(): - print(f"Using API key: {gcloud_api_key}") - return google.genai.Client(api_key=gcloud_api_key) + return google.genai.Client(api_key=gemini_api_key) return _factory @pytest.fixture -def vertex_client_factory(in_replay_mode, gcloud_project, gcloud_location, gcloud_credentials): +def vertex_client_factory(gcloud_project, gcloud_location, gcloud_credentials): def _factory(): return google.genai.Client( vertexai=True, @@ -291,14 +339,16 @@ def _factory(): return _factory -@pytest.fixture(params=["vertexaiapi", "geminiapi"]) +@pytest.fixture(params=["vertexaiapi"]) def genai_sdk_backend(request): return request.param -@pytest.fixture +@pytest.fixture(autouse=True) def use_vertex(genai_sdk_backend): - return genai_sdk_backend == "vertexaiapi" + result = bool(genai_sdk_backend == "vertexaiapi") + os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "1" if result else "0" + return result @pytest.fixture @@ -313,7 +363,7 @@ def is_async(request): return request.param == "async" -@pytest.fixture(params=["gemini-1.0-flash", "gemini-2.0-flash"]) +@pytest.fixture(params=["gemini-1.5-flash-002"]) def model(request): return request.param @@ -353,7 +403,7 @@ async def _gather_all(): @pytest.mark.vcr -def test_single_response(generate_content, model, otel_mocker): +def test_non_streaming(generate_content, model, otel_mocker): response = generate_content( model=model, contents="Create a poem about Open Telemetry.") @@ -364,15 +414,14 @@ def test_single_response(generate_content, model, otel_mocker): @pytest.mark.vcr -def test_multiple_responses(generate_content_stream, model, otel_mocker): +def test_streaming(generate_content_stream, model, otel_mocker): count = 0 for response in generate_content_stream( model=model, - contents="Create a poem about Open Telemetry.", - config=genai_types.GenerateContentConfig(candidate_count=2)): + contents="Create a poem about Open Telemetry."): assert response is not None assert response.text is not None assert len(response.text) > 0 count += 1 - assert count == 2 + assert count > 0 otel_mocker.assert_has_span_named(f"generate_content {model}") From 1692b9eb7a5bc3c9a7ea665f75a5209d33dd25fd Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 7 Mar 2025 17:26:47 -0500 Subject: [PATCH 09/14] Reformat with ruff. --- .../tests/common/auth.py | 1 - .../tests/generate_content/base.py | 44 ++++-- .../tests/generate_content/streaming_base.py | 2 +- .../tests/generate_content/test_e2e.py | 148 ++++++++++-------- .../tests/generate_content/util.py | 28 ++-- 5 files changed, 133 insertions(+), 90 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py index d6ef5b66d8..88831a3e9a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/common/auth.py @@ -16,6 +16,5 @@ class FakeCredentials(google.auth.credentials.AnonymousCredentials): - def refresh(self, request): pass diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py index 728d516ae7..bb20c8c737 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import asyncio import unittest import unittest.mock -from google.genai.models import Models, AsyncModels +from google.genai.models import AsyncModels, Models + from ..common.base import TestCase as CommonTestCaseBase from .util import convert_to_response, create_response @@ -45,7 +45,7 @@ def mock_generate_content(self): if self._generate_content_mock is None: self._create_and_install_mocks() return self._generate_content_mock - + @property def mock_generate_content_stream(self): if self._generate_content_stream_mock is None: @@ -68,6 +68,7 @@ def _create_and_install_mocks(self): def _create_nonstream_mock(self): mock = unittest.mock.MagicMock() + def _default_impl(*args, **kwargs): if not self._responses: return create_response(text="Some response") @@ -75,39 +76,49 @@ def _default_impl(*args, **kwargs): result = self._responses[index] self._response_index += 1 return result + mock.side_effect = _default_impl return mock def _create_stream_mock(self): mock = unittest.mock.MagicMock() + def _default_impl(*args, **kwargs): for response in self._responses: yield response + mock.side_effect = _default_impl return mock def _install_mocks(self): output_wrapped = self._wrap_output(self._generate_content_mock) - output_wrapped_stream = self._wrap_output_stream(self._generate_content_stream_mock) + output_wrapped_stream = self._wrap_output_stream( + self._generate_content_stream_mock + ) Models.generate_content = output_wrapped Models.generate_content_stream = output_wrapped_stream AsyncModels.generate_content = self._async_wrapper(output_wrapped) - AsyncModels.generate_content_stream = self._async_stream_wrapper(output_wrapped_stream) - + AsyncModels.generate_content_stream = self._async_stream_wrapper( + output_wrapped_stream + ) + def _wrap_output(self, mock_generate_content): def _wrapped(*args, **kwargs): return convert_to_response(mock_generate_content(*args, **kwargs)) + return _wrapped def _wrap_output_stream(self, mock_generate_content_stream): def _wrapped(*args, **kwargs): for output in mock_generate_content_stream(*args, **kwargs): - yield convert_to_response(output) + yield convert_to_response(output) + return _wrapped def _async_wrapper(self, mock_generate_content): async def _wrapped(*args, **kwargs): return mock_generate_content(*args, **kwargs) + return _wrapped def _async_stream_wrapper(self, mock_generate_content_stream): @@ -115,19 +126,30 @@ async def _wrapped(*args, **kwargs): async def _internal_generator(): for result in mock_generate_content_stream(*args, **kwargs): yield result + return _internal_generator() + return _wrapped def tearDown(self): super().tearDown() if self._generate_content_mock is None: assert Models.generate_content == self._original_generate_content - assert Models.generate_content_stream == self._original_generate_content_stream - assert AsyncModels.generate_content == self._original_async_generate_content - assert AsyncModels.generate_content_stream == self._original_async_generate_content_stream + assert ( + Models.generate_content_stream + == self._original_generate_content_stream + ) + assert ( + AsyncModels.generate_content + == self._original_async_generate_content + ) + assert ( + AsyncModels.generate_content_stream + == self._original_async_generate_content_stream + ) Models.generate_content = self._original_generate_content Models.generate_content_stream = self._original_generate_content_stream AsyncModels.generate_content = self._original_async_generate_content AsyncModels.generate_content_stream = ( self._original_async_generate_content_stream - ) \ No newline at end of file + ) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py index 66985ce399..e5bceb7c79 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/streaming_base.py @@ -16,6 +16,7 @@ from .base import TestCase + class StreamingTestCase(TestCase): # The "setUp" function is defined by "unittest.TestCase" and thus # this name must be used. Uncertain why pylint doesn't seem to @@ -32,7 +33,6 @@ def generate_content(self, *args, **kwargs): def expected_function_name(self): raise NotImplementedError("Must implement 'expected_function_name'.") - def test_instrumentation_does_not_break_core_functionality(self): self.configure_valid_response(text="Yep, it works!") responses = self.generate_content( diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 87fd8fd1a6..835af2de22 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -23,42 +23,42 @@ secondary goal of this test. Detailed testing of the instrumentation output is the purview of the other tests in this directory.""" - -import subprocess +import asyncio import json -import yaml +import os +import subprocess + import google.auth import google.auth.credentials import google.genai -from google.genai import types as genai_types -import os -import vcr -from vcr.record_mode import RecordMode -import logging -import asyncio import pytest -import urllib.parse - -from ..common.auth import FakeCredentials -from ..common.otel_mocker import OTelMocker +import yaml +from vcr.record_mode import RecordMode from opentelemetry.instrumentation.google_genai import ( GoogleGenAiSdkInstrumentor, ) +from ..common.auth import FakeCredentials +from ..common.otel_mocker import OTelMocker _FAKE_PROJECT = "test-project" _FAKE_LOCATION = "test-location" _FAKE_API_KEY = "test-api-key" _DEFAULT_REAL_LOCATION = "us-central1" + def _get_project_from_env(): - return os.getenv("GCLOUD_PROJECT") or os.getenv("GOOGLE_CLOUD_PROJECT") or "" + return ( + os.getenv("GCLOUD_PROJECT") or os.getenv("GOOGLE_CLOUD_PROJECT") or "" + ) def _get_project_from_gcloud_cli(): try: - gcloud_call_result = subprocess.run("gcloud config get project", shell=True, capture_output=True) + gcloud_call_result = subprocess.run( + "gcloud config get project", shell=True, capture_output=True + ) except subprocess.CalledProcessError: return None gcloud_output = gcloud_call_result.stdout.decode() @@ -81,7 +81,11 @@ def _get_real_project(): def _get_location_from_env(): - return os.getenv("GCLOUD_LOCATION") or os.getenv("GOOGLE_CLOUD_LOCATION") or "" + return ( + os.getenv("GCLOUD_LOCATION") + or os.getenv("GOOGLE_CLOUD_LOCATION") + or "" + ) def _get_real_location(): @@ -97,11 +101,11 @@ def _get_gemini_api_key_from_env(): def _should_redact_header(header_key): - if header_key.startswith('x-goog'): + if header_key.startswith("x-goog"): return True - if header_key.startswith('sec-goog'): + if header_key.startswith("sec-goog"): return True - if header_key in ['server', 'server-timing']: + if header_key in ["server", "server-timing"]: return True return False @@ -114,64 +118,65 @@ def _redact_headers(headers): for header_key in to_redact: headers[header_key] = "" + def _before_record_request(request): if request.headers: - _redact_headers(request.headers) + _redact_headers(request.headers) uri = request.uri project = _get_project_from_env() if project: uri = uri.replace(f"projects/{project}", f"projects/{_FAKE_PROJECT}") location = _get_real_location() if location: - uri = uri.replace(f"locations/{location}", f"locations/{_FAKE_LOCATION}") - uri = uri.replace(f"//{location}-aiplatform.googleapis.com", f"//{_FAKE_LOCATION}-aiplatform.googleapis.com") + uri = uri.replace( + f"locations/{location}", f"locations/{_FAKE_LOCATION}" + ) + uri = uri.replace( + f"//{location}-aiplatform.googleapis.com", + f"//{_FAKE_LOCATION}-aiplatform.googleapis.com", + ) request.uri = uri return request def _before_record_response(response): if hasattr(response, "headers") and response.headers: - _redact_headers(response.headers) + _redact_headers(response.headers) return response -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def vcr_config(): return { - 'filter_query_parameters': [ - 'key', - 'apiKey', - 'quotaUser', - 'userProject', - 'token', - 'access_token', - 'accessToken', - 'refesh_token', - 'refreshToken', - 'authuser', - 'bearer', - 'bearer_token', - 'bearerToken', - 'userIp', - ], - 'filter_post_data_parameters': [ - 'apikey', - 'api_key', - 'key' + "filter_query_parameters": [ + "key", + "apiKey", + "quotaUser", + "userProject", + "token", + "access_token", + "accessToken", + "refesh_token", + "refreshToken", + "authuser", + "bearer", + "bearer_token", + "bearerToken", + "userIp", ], - 'filter_headers': [ - 'x-goog-api-key', - 'authorization', - 'server', - 'Server' - 'Server-Timing', - 'Date', + "filter_post_data_parameters": ["apikey", "api_key", "key"], + "filter_headers": [ + "x-goog-api-key", + "authorization", + "server", + "Server" "Server-Timing", + "Date", ], - 'before_record_request': _before_record_request, - 'before_record_response': _before_record_response, - 'ignore_hosts': [ - 'oauth2.googleapis.com', - 'iam.googleapis.com', + "before_record_request": _before_record_request, + "before_record_response": _before_record_response, + "ignore_hosts": [ + "oauth2.googleapis.com", + "iam.googleapis.com", ], } @@ -268,7 +273,9 @@ def otel_mocker(): @pytest.fixture(autouse=True, params=["logcontent", "excludecontent"]) def setup_content_recording(request): enabled = request.param == "logcontent" - os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str(enabled) + os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str( + enabled + ) @pytest.fixture @@ -281,7 +288,6 @@ def in_replay_mode(vcr_record_mode): return vcr_record_mode == RecordMode.NONE - @pytest.fixture(autouse=True) def gcloud_project(in_replay_mode): if in_replay_mode: @@ -304,7 +310,9 @@ def gcloud_credentials(in_replay_mode): if in_replay_mode: return FakeCredentials() creds, _ = google.auth.default() - return google.auth.credentials.with_scopes_if_required(creds, ["https://www.googleapis.com/auth/cloud-platform"]) + return google.auth.credentials.with_scopes_if_required( + creds, ["https://www.googleapis.com/auth/cloud-platform"] + ) @pytest.fixture @@ -317,7 +325,7 @@ def gemini_api_key(in_replay_mode): @pytest.fixture(autouse=True) def gcloud_api_key(gemini_api_key): if "GOOGLE_API_KEY" not in os.environ: - os.environ["GOOGLE_API_KEY"] = gemini_api_key + os.environ["GOOGLE_API_KEY"] = gemini_api_key return os.getenv("GOOGLE_API_KEY") @@ -325,6 +333,7 @@ def gcloud_api_key(gemini_api_key): def nonvertex_client_factory(gemini_api_key): def _factory(): return google.genai.Client(api_key=gemini_api_key) + return _factory @@ -335,7 +344,9 @@ def _factory(): vertexai=True, project=gcloud_project, location=gcloud_location, - credentials=gcloud_credentials) + credentials=gcloud_credentials, + ) + return _factory @@ -392,9 +403,14 @@ def _sync_impl(*args, **kwargs): def _async_impl(*args, **kwargs): async def _gather_all(): results = [] - async for result in await client.aio.models.generate_content_stream(*args, **kwargs): + async for ( + result + ) in await client.aio.models.generate_content_stream( + *args, **kwargs + ): results.append(result) return results + return asyncio.run(_gather_all()) if is_async: @@ -405,8 +421,8 @@ async def _gather_all(): @pytest.mark.vcr def test_non_streaming(generate_content, model, otel_mocker): response = generate_content( - model=model, - contents="Create a poem about Open Telemetry.") + model=model, contents="Create a poem about Open Telemetry." + ) assert response is not None assert response.text is not None assert len(response.text) > 0 @@ -417,8 +433,8 @@ def test_non_streaming(generate_content, model, otel_mocker): def test_streaming(generate_content_stream, model, otel_mocker): count = 0 for response in generate_content_stream( - model=model, - contents="Create a poem about Open Telemetry."): + model=model, contents="Create a poem about Open Telemetry." + ): assert response is not None assert response.text is not None assert len(response.text) > 0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py index 6f56b5b5b5..2bbd5bdd53 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/util.py @@ -14,7 +14,6 @@ from typing import Optional, Union -import google.genai import google.genai.types as genai_types @@ -25,20 +24,23 @@ def create_response( candidate: Optional[genai_types.Candidate] = None, candidates: Optional[list[genai_types.Candidate]] = None, text: Optional[str] = None, - input_tokens: Optional[int]=None, - output_tokens: Optional[int]=None, - model_version: Optional[str]=None, - usage_metadata: Optional[genai_types.GenerateContentResponseUsageMetadata]=None, - **kwargs) -> genai_types.GenerateContentResponse: + input_tokens: Optional[int] = None, + output_tokens: Optional[int] = None, + model_version: Optional[str] = None, + usage_metadata: Optional[ + genai_types.GenerateContentResponseUsageMetadata + ] = None, + **kwargs, +) -> genai_types.GenerateContentResponse: # Build up the "candidates" subfield if text is None: - text = 'Some response text' + text = "Some response text" if part is None: part = genai_types.Part(text=text) if parts is None: parts = [part] if content is None: - content = genai_types.Content(parts=parts, role='model') + content = genai_types.Content(parts=parts, role="model") if candidate is None: candidate = genai_types.Candidate(content=content) if candidates is None: @@ -55,15 +57,19 @@ def create_response( candidates=candidates, usage_metadata=usage_metadata, model_version=model_version, - **kwargs) + **kwargs, + ) def convert_to_response( - arg: Union[str, genai_types.GenerateContentResponse, dict]) -> genai_types.GenerateContentResponse: + arg: Union[str, genai_types.GenerateContentResponse, dict], +) -> genai_types.GenerateContentResponse: if isinstance(arg, str): return create_response(text=arg) if isinstance(arg, genai_types.GenerateContentResponse): return arg if isinstance(arg, dict): return create_response(**arg) - raise ValueError(f"Unsure how to convert {arg} of type {arg.__class__.__name__} to response.") + raise ValueError( + f"Unsure how to convert {arg} of type {arg.__class__.__name__} to response." + ) From e7eb450eef38e6f0bdf1273c55b0983594e06ec8 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 14 Mar 2025 11:50:10 -0400 Subject: [PATCH 10/14] Fix lint and gzip issue. --- .../tests/generate_content/base.py | 75 +++++----- .../tests/generate_content/test_e2e.py | 132 +++++++++++------- 2 files changed, 125 insertions(+), 82 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py index bb20c8c737..4eed570b3a 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py @@ -21,6 +21,44 @@ from .util import convert_to_response, create_response +# Helper used in "_install_mocks" below. +def _wrap_output(mock_generate_content): + def _wrapped(*args, **kwargs): + return convert_to_response(mock_generate_content(*args, **kwargs)) + + return _wrapped + + +# Helper used in "_install_mocks" below. +def _wrap_output_stream(mock_generate_content_stream): + def _wrapped(*args, **kwargs): + for output in mock_generate_content_stream(*args, **kwargs): + yield convert_to_response(output) + + return _wrapped + + +# Helper used in "_install_mocks" below. +def _async_wrapper(mock_generate_content): + async def _wrapped(*args, **kwargs): + return mock_generate_content(*args, **kwargs) + + return _wrapped + + +# Helper used in "_install_mocks" below. +def _async_stream_wrapper(mock_generate_content_stream): + async def _wrapped(*args, **kwargs): + async def _internal_generator(): + for result in mock_generate_content_stream(*args, **kwargs): + yield result + + return _internal_generator() + + return _wrapped + + + class TestCase(CommonTestCaseBase): # The "setUp" function is defined by "unittest.TestCase" and thus # this name must be used. Uncertain why pylint doesn't seem to @@ -91,46 +129,17 @@ def _default_impl(*args, **kwargs): return mock def _install_mocks(self): - output_wrapped = self._wrap_output(self._generate_content_mock) - output_wrapped_stream = self._wrap_output_stream( + output_wrapped = _wrap_output(self._generate_content_mock) + output_wrapped_stream = _wrap_output_stream( self._generate_content_stream_mock ) Models.generate_content = output_wrapped Models.generate_content_stream = output_wrapped_stream - AsyncModels.generate_content = self._async_wrapper(output_wrapped) - AsyncModels.generate_content_stream = self._async_stream_wrapper( + AsyncModels.generate_content = _async_wrapper(output_wrapped) + AsyncModels.generate_content_stream = _async_stream_wrapper( output_wrapped_stream ) - def _wrap_output(self, mock_generate_content): - def _wrapped(*args, **kwargs): - return convert_to_response(mock_generate_content(*args, **kwargs)) - - return _wrapped - - def _wrap_output_stream(self, mock_generate_content_stream): - def _wrapped(*args, **kwargs): - for output in mock_generate_content_stream(*args, **kwargs): - yield convert_to_response(output) - - return _wrapped - - def _async_wrapper(self, mock_generate_content): - async def _wrapped(*args, **kwargs): - return mock_generate_content(*args, **kwargs) - - return _wrapped - - def _async_stream_wrapper(self, mock_generate_content_stream): - async def _wrapped(*args, **kwargs): - async def _internal_generator(): - for result in mock_generate_content_stream(*args, **kwargs): - yield result - - return _internal_generator() - - return _wrapped - def tearDown(self): super().tearDown() if self._generate_content_mock is None: diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 835af2de22..69d7824067 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -28,6 +28,7 @@ import os import subprocess +import gzip import google.auth import google.auth.credentials import google.genai @@ -57,7 +58,7 @@ def _get_project_from_env(): def _get_project_from_gcloud_cli(): try: gcloud_call_result = subprocess.run( - "gcloud config get project", shell=True, capture_output=True + "gcloud config get project", shell=True, capture_output=True, check=True ) except subprocess.CalledProcessError: return None @@ -145,8 +146,8 @@ def _before_record_response(response): return response -@pytest.fixture(scope="module") -def vcr_config(): +@pytest.fixture(name="vcr_config", scope="module") +def fixture_vcr_config(): return { "filter_query_parameters": [ "key", @@ -169,7 +170,8 @@ def vcr_config(): "x-goog-api-key", "authorization", "server", - "Server" "Server-Timing", + "Server", + "Server-Timing", "Date", ], "before_record_request": _before_record_request, @@ -191,8 +193,8 @@ def _literal_block_scalar_presenter(dumper, data): return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") -@pytest.fixture(scope="module", autouse=True) -def setup_yaml_pretty_formattinmg(): +@pytest.fixture(name="internal_setup_yaml_pretty_formatting", scope="module", autouse=True) +def fixture_setup_yaml_pretty_formatting(): yaml.add_representer(_LiteralBlockScalar, _literal_block_scalar_presenter) @@ -229,6 +231,35 @@ def _convert_body_to_literal(data): return data +# Helper for enforcing GZIP compression where it was originally. +def _ensure_gzip_single_response(data: bytes): + try: + # Attempt to decompress, first, to avoid double compression. + gzip.decompress(data) + return data + except gzip.BadGzipFile: + # It must not have been compressed in the first place. + return gzip.compress(data) + + +# VCRPy automatically decompresses responses before saving them, but it may forget to +# re-encode them when the data is loaded. This can create issues with decompression. +# This is why we re-encode on load; to accurately replay what was originally sent. +# +# https://vcrpy.readthedocs.io/en/latest/advanced.html#decode-compressed-response +def _ensure_casette_gzip(loaded_casette): + for interaction in loaded_casette["interactions"]: + response = interaction["response"] + headers = response["headers"] + if "content-encoding" not in headers and "Content-Encoding" not in headers: + continue + if "content-encoding" in headers and "gzip" not in headers["content-encoding"]: + continue + if "Content-Encoding" in headers and "gzip" not in headers["Content-Encoding"]: + continue + response["body"]["string"] = _ensure_gzip_single_response(response["body"]["string"].encode()) + + class _PrettyPrintJSONBody: """This makes request and response body recordings more readable.""" @@ -241,55 +272,58 @@ def serialize(cassette_dict): @staticmethod def deserialize(cassette_string): - return yaml.load(cassette_string, Loader=yaml.Loader) + result = yaml.load(cassette_string, Loader=yaml.Loader) + _ensure_casette_gzip(result) + return result -@pytest.fixture(scope="module", autouse=True) +@pytest.fixture(name="fully_initialized_vcr", scope="module", autouse=True) def setup_vcr(vcr): vcr.register_serializer("yaml", _PrettyPrintJSONBody) + vcr.serializer = "yaml" return vcr -@pytest.fixture -def instrumentor(): +@pytest.fixture(name="instrumentor") +def fixture_instrumentor(): return GoogleGenAiSdkInstrumentor() -@pytest.fixture(autouse=True) -def setup_instrumentation(instrumentor): +@pytest.fixture(name="internal_instrumentation_setup", autouse=True) +def fixture_setup_instrumentation(instrumentor): instrumentor.instrument() yield instrumentor.uninstrument() -@pytest.fixture(autouse=True) -def otel_mocker(): +@pytest.fixture(name="otel_mocker", autouse=True) +def fixture_otel_mocker(): result = OTelMocker() result.install() yield result result.uninstall() -@pytest.fixture(autouse=True, params=["logcontent", "excludecontent"]) -def setup_content_recording(request): +@pytest.fixture(name="setup_content_recording", autouse=True, params=["logcontent", "excludecontent"]) +def fixture_setup_content_recording(request): enabled = request.param == "logcontent" os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str( enabled ) -@pytest.fixture -def vcr_record_mode(vcr): +@pytest.fixture(name="vcr_record_mode") +def fixture_vcr_record_mode(vcr): return vcr.record_mode -@pytest.fixture -def in_replay_mode(vcr_record_mode): +@pytest.fixture(name="in_replay_mode") +def fixture_in_replay_mode(vcr_record_mode): return vcr_record_mode == RecordMode.NONE -@pytest.fixture(autouse=True) -def gcloud_project(in_replay_mode): +@pytest.fixture(name="gcloud_project", autouse=True) +def fixture_gcloud_project(in_replay_mode): if in_replay_mode: return _FAKE_PROJECT result = _get_real_project() @@ -298,15 +332,15 @@ def gcloud_project(in_replay_mode): return result -@pytest.fixture -def gcloud_location(in_replay_mode): +@pytest.fixture(name="gcloud_location") +def fixture_gcloud_location(in_replay_mode): if in_replay_mode: return _FAKE_LOCATION return _get_real_location() -@pytest.fixture -def gcloud_credentials(in_replay_mode): +@pytest.fixture(name="gcloud_credentials") +def fixture_gcloud_credentials(in_replay_mode): if in_replay_mode: return FakeCredentials() creds, _ = google.auth.default() @@ -315,30 +349,30 @@ def gcloud_credentials(in_replay_mode): ) -@pytest.fixture -def gemini_api_key(in_replay_mode): +@pytest.fixture(name="gemini_api_key") +def fixture_gemini_api_key(in_replay_mode): if in_replay_mode: return _FAKE_API_KEY return os.getenv("GEMINI_API_KEY") -@pytest.fixture(autouse=True) -def gcloud_api_key(gemini_api_key): +@pytest.fixture(name="gcloud_api_key", autouse=True) +def fixture_gcloud_api_key(gemini_api_key): if "GOOGLE_API_KEY" not in os.environ: os.environ["GOOGLE_API_KEY"] = gemini_api_key return os.getenv("GOOGLE_API_KEY") -@pytest.fixture -def nonvertex_client_factory(gemini_api_key): +@pytest.fixture(name="nonvertex_client_factory") +def fixture_nonvertex_client_factory(gemini_api_key): def _factory(): - return google.genai.Client(api_key=gemini_api_key) + return google.genai.Client(api_key=gemini_api_key, vertexai=False) return _factory -@pytest.fixture -def vertex_client_factory(gcloud_project, gcloud_location, gcloud_credentials): +@pytest.fixture(name="vertex_client_factory") +def fixture_vertex_client_factory(gcloud_project, gcloud_location, gcloud_credentials): def _factory(): return google.genai.Client( vertexai=True, @@ -350,37 +384,37 @@ def _factory(): return _factory -@pytest.fixture(params=["vertexaiapi"]) -def genai_sdk_backend(request): +@pytest.fixture(name="genai_sdk_backend", params=["vertexaiapi"]) +def fixture_genai_sdk_backend(request): return request.param -@pytest.fixture(autouse=True) -def use_vertex(genai_sdk_backend): +@pytest.fixture(name="use_vertex", autouse=True) +def fixture_use_vertex(genai_sdk_backend): result = bool(genai_sdk_backend == "vertexaiapi") os.environ["GOOGLE_GENAI_USE_VERTEXAI"] = "1" if result else "0" return result -@pytest.fixture -def client(vertex_client_factory, nonvertex_client_factory, use_vertex): +@pytest.fixture(name="client") +def fixture_client(vertex_client_factory, nonvertex_client_factory, use_vertex): if use_vertex: return vertex_client_factory() return nonvertex_client_factory() -@pytest.fixture(params=["sync", "async"]) -def is_async(request): +@pytest.fixture(name="is_async", params=["sync", "async"]) +def fixture_is_async(request): return request.param == "async" -@pytest.fixture(params=["gemini-1.5-flash-002"]) -def model(request): +@pytest.fixture(name="model", params=["gemini-1.5-flash-002"]) +def fixture_model(request): return request.param -@pytest.fixture -def generate_content(client, is_async): +@pytest.fixture(name="generate_content") +def fixture_generate_content(client, is_async): def _sync_impl(*args, **kwargs): return client.models.generate_content(*args, **kwargs) @@ -392,8 +426,8 @@ def _async_impl(*args, **kwargs): return _sync_impl -@pytest.fixture -def generate_content_stream(client, is_async): +@pytest.fixture(name="generate_content_stream") +def fixture_generate_content_stream(client, is_async): def _sync_impl(*args, **kwargs): results = [] for result in client.models.generate_content_stream(*args, **kwargs): From 0ddc6444806f8ebe47f6215b7638ef4c4f227500 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 14 Mar 2025 11:50:39 -0400 Subject: [PATCH 11/14] Reformat with ruff. --- .../tests/generate_content/base.py | 1 - .../tests/generate_content/test_e2e.py | 44 ++++++++++++++----- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py index 4eed570b3a..59f08a5e44 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/base.py @@ -58,7 +58,6 @@ async def _internal_generator(): return _wrapped - class TestCase(CommonTestCaseBase): # The "setUp" function is defined by "unittest.TestCase" and thus # this name must be used. Uncertain why pylint doesn't seem to diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 69d7824067..1afe81c6bb 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -24,11 +24,11 @@ output is the purview of the other tests in this directory.""" import asyncio +import gzip import json import os import subprocess -import gzip import google.auth import google.auth.credentials import google.genai @@ -58,7 +58,10 @@ def _get_project_from_env(): def _get_project_from_gcloud_cli(): try: gcloud_call_result = subprocess.run( - "gcloud config get project", shell=True, capture_output=True, check=True + "gcloud config get project", + shell=True, + capture_output=True, + check=True, ) except subprocess.CalledProcessError: return None @@ -193,7 +196,9 @@ def _literal_block_scalar_presenter(dumper, data): return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") -@pytest.fixture(name="internal_setup_yaml_pretty_formatting", scope="module", autouse=True) +@pytest.fixture( + name="internal_setup_yaml_pretty_formatting", scope="module", autouse=True +) def fixture_setup_yaml_pretty_formatting(): yaml.add_representer(_LiteralBlockScalar, _literal_block_scalar_presenter) @@ -251,13 +256,24 @@ def _ensure_casette_gzip(loaded_casette): for interaction in loaded_casette["interactions"]: response = interaction["response"] headers = response["headers"] - if "content-encoding" not in headers and "Content-Encoding" not in headers: + if ( + "content-encoding" not in headers + and "Content-Encoding" not in headers + ): continue - if "content-encoding" in headers and "gzip" not in headers["content-encoding"]: + if ( + "content-encoding" in headers + and "gzip" not in headers["content-encoding"] + ): continue - if "Content-Encoding" in headers and "gzip" not in headers["Content-Encoding"]: + if ( + "Content-Encoding" in headers + and "gzip" not in headers["Content-Encoding"] + ): continue - response["body"]["string"] = _ensure_gzip_single_response(response["body"]["string"].encode()) + response["body"]["string"] = _ensure_gzip_single_response( + response["body"]["string"].encode() + ) class _PrettyPrintJSONBody: @@ -304,7 +320,11 @@ def fixture_otel_mocker(): result.uninstall() -@pytest.fixture(name="setup_content_recording", autouse=True, params=["logcontent", "excludecontent"]) +@pytest.fixture( + name="setup_content_recording", + autouse=True, + params=["logcontent", "excludecontent"], +) def fixture_setup_content_recording(request): enabled = request.param == "logcontent" os.environ["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = str( @@ -372,7 +392,9 @@ def _factory(): @pytest.fixture(name="vertex_client_factory") -def fixture_vertex_client_factory(gcloud_project, gcloud_location, gcloud_credentials): +def fixture_vertex_client_factory( + gcloud_project, gcloud_location, gcloud_credentials +): def _factory(): return google.genai.Client( vertexai=True, @@ -397,7 +419,9 @@ def fixture_use_vertex(genai_sdk_backend): @pytest.fixture(name="client") -def fixture_client(vertex_client_factory, nonvertex_client_factory, use_vertex): +def fixture_client( + vertex_client_factory, nonvertex_client_factory, use_vertex +): if use_vertex: return vertex_client_factory() return nonvertex_client_factory() From 6e35583e73602a03a852609b2087ac117f6347e9 Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 14 Mar 2025 12:04:46 -0400 Subject: [PATCH 12/14] Prevent fix for Python 3.9 from breaking tests in other versions. --- .../tests/generate_content/test_e2e.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 1afe81c6bb..9fc574be47 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -28,6 +28,7 @@ import json import os import subprocess +import sys import google.auth import google.auth.credentials @@ -276,6 +277,11 @@ def _ensure_casette_gzip(loaded_casette): ) +def _maybe_ensure_casette_gzip(result): + if sys.version_info[0] == 3 and sys.version_info[1] == 9: + _ensure_casette_gzip(result) + + class _PrettyPrintJSONBody: """This makes request and response body recordings more readable.""" @@ -289,7 +295,7 @@ def serialize(cassette_dict): @staticmethod def deserialize(cassette_string): result = yaml.load(cassette_string, Loader=yaml.Loader) - _ensure_casette_gzip(result) + _maybe_ensure_casette_gzip(result) return result From 2793544881d4ce24e545445782835ff82a8d1cca Mon Sep 17 00:00:00 2001 From: Michael Aaron Safyan Date: Fri, 14 Mar 2025 13:55:43 -0400 Subject: [PATCH 13/14] Record update in changelog. --- .../opentelemetry-instrumentation-google-genai/CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md index 7f1fe2d761..d367d992d2 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- Restructure tests to keep in line with repository conventions ([#3344](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3344)) + ## Version 0.1b0 (2025-03-05) - Add support for async and streaming. From c7ed0bb985863dbba80aebbfb6efd00ff35cb3e1 Mon Sep 17 00:00:00 2001 From: Michael Safyan Date: Wed, 19 Mar 2025 11:55:34 -0500 Subject: [PATCH 14/14] Don't double iterate when redacting by changing the value. Co-authored-by: Aaron Abbott --- .../tests/generate_content/test_e2e.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py index 9fc574be47..afe4dbfe6b 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py +++ b/instrumentation-genai/opentelemetry-instrumentation-google-genai/tests/generate_content/test_e2e.py @@ -116,12 +116,9 @@ def _should_redact_header(header_key): def _redact_headers(headers): - to_redact = [] for header_key in headers: if _should_redact_header(header_key.lower()): - to_redact.append(header_key) - for header_key in to_redact: - headers[header_key] = "" + headers[header_key] = "" def _before_record_request(request):