Skip to content

Minor Image Analysis SDK update for beta.2 release #34213

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Feb 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions sdk/vision/azure-ai-vision-imageanalysis/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
# Release History

## 1.0.0b2 (Unreleased)

### Features Added
## 1.0.0b2 (2024-02-09)

### Breaking Changes

### Bugs Fixed

### Other Changes
- In the previous version, you would call the `analyze` method on the `ImageAnalysisClient` to analyze an image from a publicly accessible URL, or from a memory buffer. To better align with other Azure client libraires, this was changed in this release. Call the new dedicated `analyze_from_url` method to analyze an image from URL. Keep calling the `analyze` method to analyze an image from a memory buffer.

## 1.0.0b1 (2024-01-09)

Expand Down
4 changes: 2 additions & 2 deletions sdk/vision/azure-ai-vision-imageanalysis/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ This example is similar to the above, expect it calls the `analyze` method and p

```python
# Get a caption for the image. This will be a synchronously (blocking) call.
result = client.analyze(
result = client.analyze_from_url(
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.CAPTION],
gender_neutral_caption=True, # Optional (default is False)
Expand Down Expand Up @@ -224,7 +224,7 @@ This example is similar to the above, expect it calls the `analyze` method and p

```python
# Extract text (OCR) from an image stream. This will be a synchronously (blocking) call.
result = client.analyze(
result = client.analyze_from_url(
image_url="https://aka.ms/azsdk/image-analysis/sample.jpg",
visual_features=[VisualFeatures.READ]
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import re
import copy
import typing
import email
import email.utils
from datetime import datetime, date, time, timedelta, timezone
from json import JSONEncoder
import isodate
Expand Down Expand Up @@ -462,7 +462,13 @@ def _get_rest_field(


def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any:
return _deserialize(rf._type, value) if (rf and rf._is_model) else _serialize(value, rf._format if rf else None)
if not rf:
return _serialize(value, None)
if rf._is_multipart_file_input:
return value
if rf._is_model:
return _deserialize(rf._type, value)
return _serialize(value, rf._format)


class Model(_MyMutableMapping):
Expand Down Expand Up @@ -559,15 +565,22 @@ def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.
for k, v in self.items():
if exclude_readonly and k in readonly_props: # pyright: ignore[reportUnboundVariable]
continue
result[k] = Model._as_dict_value(v, exclude_readonly=exclude_readonly)
is_multipart_file_input = False
try:
is_multipart_file_input = next(
rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k
)._is_multipart_file_input
except StopIteration:
pass
result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly)
return result

@staticmethod
def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any:
if v is None or isinstance(v, _Null):
return None
if isinstance(v, (list, tuple, set)):
return [Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v]
return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v)
if isinstance(v, dict):
return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()}
return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v
Expand Down Expand Up @@ -762,6 +775,7 @@ def __init__(
visibility: typing.Optional[typing.List[str]] = None,
default: typing.Any = _UNSET,
format: typing.Optional[str] = None,
is_multipart_file_input: bool = False,
):
self._type = type
self._rest_name_input = name
Expand All @@ -771,6 +785,7 @@ def __init__(
self._is_model = False
self._default = default
self._format = format
self._is_multipart_file_input = is_multipart_file_input

@property
def _rest_name(self) -> str:
Expand Down Expand Up @@ -816,8 +831,16 @@ def rest_field(
visibility: typing.Optional[typing.List[str]] = None,
default: typing.Any = _UNSET,
format: typing.Optional[str] = None,
is_multipart_file_input: bool = False,
) -> typing.Any:
return _RestField(name=name, type=type, visibility=visibility, default=default, format=format)
return _RestField(
name=name,
type=type,
visibility=visibility,
default=default,
format=format,
is_multipart_file_input=is_multipart_file_input,
)


def rest_discriminator(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,6 @@ def _analyze_from_image_data(
If however you would like to make sure analysis results do not change over time, set this
value to a specific model version. Default value is None.
:paramtype model_version: str
:keyword content_type: The format of the HTTP payload. Default value is
"application/octet-stream".
:paramtype content_type: str
:return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping
:rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
Expand Down Expand Up @@ -488,8 +485,6 @@ def _analyze_from_url(
If however you would like to make sure analysis results do not change over time, set this
value to a specific model version. Default value is None.
:paramtype model_version: str
:keyword content_type: The format of the HTTP payload. Default value is None.
:paramtype content_type: str
:return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping
:rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,8 @@

Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize

Note 1: the analyze overloads here should have been implemented in the `_patch.py` file in the `_operations` folder
instead of here. That would have worked fine, except there is an issue with the generated Python
ref-docs. The overloads do not show up. See this GitHub issue: https://github.com/Azure/autorest.python/issues/1315.
To overcome this, the overloads are defined here. Consider moving them to the right place once the
above issue is fixed.

Note 2: Don't bother documenting the two overload methods below. The doc tool (sphinx) will not pick them up. Instead,
document the 3rd method.
"""
from typing import List, overload, Any, Optional, Union
from typing import List, Any, Optional, Union
from azure.core.tracing.decorator import distributed_trace
from . import models as _models
from ._operations._operations import ImageAnalysisClientOperationsMixin
Expand All @@ -35,41 +27,73 @@ class ImageAnalysisClient(ImageAnalysisClientGenerated):
:paramtype api_version: str
"""

@overload
def analyze(
@distributed_trace
def analyze_from_url(
self,
*,
image_url: str,
visual_features: List[_models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
**kwargs: Any
) -> _models.ImageAnalysisResult:
...

@overload
def analyze(
self,
*,
image_data: bytes,
visual_features: List[_models.VisualFeatures],
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
model_version: Optional[str] = None,
**kwargs: Any
) -> _models.ImageAnalysisResult:
...
"""Performs a single Image Analysis operation.

:param image_url: The publicly accessible URL of the image to analyze.
:type image_url: str
:param visual_features: A list of visual features to analyze. Required. Seven visual features
are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At
least one visual feature must be specified.
:type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures]
:keyword language: The desired language for result generation (a two-letter language code).
Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages.
:paramtype language: str
:keyword gender_neutral_caption: Boolean flag for enabling gender-neutral captioning for
Caption and Dense Captions features. Defaults to 'false'.
Captions may contain gender terms (for example: 'man', 'woman', or 'boy', 'girl').
If you set this to 'true', those will be replaced with gender-neutral terms (for example:
'person' or 'child').
:paramtype gender_neutral_caption: bool
:keyword smart_crops_aspect_ratios: A list of aspect ratios to use for smart cropping.
Defaults to one crop region with an aspect ratio the service sees fit between
0.5 and 2.0 (inclusive). Aspect ratios are calculated by dividing the target crop
width in pixels by the height in pixels. When set, supported values are
between 0.75 and 1.8 (inclusive).
:paramtype smart_crops_aspect_ratios: list[float]
:keyword model_version: The version of cloud AI-model used for analysis. Defaults to 'latest',
for the latest AI model with recent improvements.
The format is the following: 'latest' or 'YYYY-MM-DD' or 'YYYY-MM-DD-preview',
where 'YYYY', 'MM', 'DD' are the year, month and day associated with the model.
If you would like to make sure analysis results do not change over time, set this
value to a specific model version.
:paramtype model_version: str
:return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping
:rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult
:raises: ~azure.core.exceptions.HttpResponseError
"""

visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features)

return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
self,
image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)


@distributed_trace
def analyze(
self,
*,
image_data: bytes,
visual_features: List[_models.VisualFeatures],
image_data: Optional[bytes] = None,
image_url: Optional[str] = None,
*,
language: Optional[str] = None,
gender_neutral_caption: Optional[bool] = None,
smart_crops_aspect_ratios: Optional[List[float]] = None,
Expand All @@ -78,14 +102,12 @@ def analyze(
) -> _models.ImageAnalysisResult:
"""Performs a single Image Analysis operation.

:keyword image_url: The publicly accessible URL of the image to analyze.
:paramtype image_url: str
:keyword image_data: A buffer containing the whole image to be analyzed.
:paramtype image_data: bytes
:keyword visual_features: A list of visual features to analyze. Required. Seven visual features
:param image_data: A buffer containing the whole image to be analyzed.
:type image_data: bytes
:param visual_features: A list of visual features to analyze. Required. Seven visual features
are supported: Caption, DenseCaptions, Read (OCR), Tags, Objects, SmartCrops, and People. At
least one visual feature must be specified.
:paramtype visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures]
:type visual_features: list[~azure.ai.vision.imageanalysis.models.VisualFeatures]
:keyword language: The desired language for result generation (a two-letter language code).
Defaults to 'en' (English). See https://aka.ms/cv-languages for a list of supported languages.
:paramtype language: str
Expand Down Expand Up @@ -115,31 +137,16 @@ def analyze(

visual_features_impl: List[Union[str, _models.VisualFeatures]] = list(visual_features)

if image_url is not None:
return ImageAnalysisClientOperationsMixin._analyze_from_url( # pylint: disable=protected-access
self,
image_content=_models._models.ImageUrl(url=image_url), # pylint: disable=protected-access
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)

if image_data is not None:
return ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access
self,
image_content=image_data,
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)

raise ValueError("Either image_data or image_url must be specified.")
return ImageAnalysisClientOperationsMixin._analyze_from_image_data( # pylint: disable=protected-access
self,
image_content=image_data,
visual_features=visual_features_impl,
language=language,
gender_neutral_caption=gender_neutral_caption,
smart_crops_aspect_ratios=smart_crops_aspect_ratios,
model_version=model_version,
**kwargs
)


__all__: List[str] = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,6 @@ async def _analyze_from_image_data(
If however you would like to make sure analysis results do not change over time, set this
value to a specific model version. Default value is None.
:paramtype model_version: str
:keyword content_type: The format of the HTTP payload. Default value is
"application/octet-stream".
:paramtype content_type: str
:return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping
:rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
Expand Down Expand Up @@ -407,8 +404,6 @@ async def _analyze_from_url(
If however you would like to make sure analysis results do not change over time, set this
value to a specific model version. Default value is None.
:paramtype model_version: str
:keyword content_type: The format of the HTTP payload. Default value is None.
:paramtype content_type: str
:return: ImageAnalysisResult. The ImageAnalysisResult is compatible with MutableMapping
:rtype: ~azure.ai.vision.imageanalysis.models.ImageAnalysisResult
:raises ~azure.core.exceptions.HttpResponseError:
Expand Down
Loading