Skip to content

Commit 0f4f0e5

Browse files
feat!: migrate to use microgen (#52)
* feat!: migrate to use microgen * chore: update code * chore: lint
1 parent 9f24bc2 commit 0f4f0e5

17 files changed

+130
-145
lines changed

vision/snippets/crop_hints/crop_hints.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
import io
2727

2828
from google.cloud import vision
29-
from google.cloud.vision import types
3029
from PIL import Image, ImageDraw
3130
# [END vision_crop_hints_tutorial_imports]
3231

@@ -39,10 +38,10 @@ def get_crop_hint(path):
3938
with io.open(path, 'rb') as image_file:
4039
content = image_file.read()
4140

42-
image = types.Image(content=content)
41+
image = vision.Image(content=content)
4342

44-
crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77])
45-
image_context = types.ImageContext(crop_hints_params=crop_hints_params)
43+
crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77])
44+
image_context = vision.ImageContext(crop_hints_params=crop_hints_params)
4645

4746
response = client.crop_hints(image=image, image_context=image_context)
4847
hints = response.crop_hints_annotation.crop_hints

vision/snippets/detect/beta_snippets.py

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def localize_objects(path):
4848

4949
with open(path, 'rb') as image_file:
5050
content = image_file.read()
51-
image = vision.types.Image(content=content)
51+
image = vision.Image(content=content)
5252

5353
objects = client.object_localization(
5454
image=image).localized_object_annotations
@@ -72,7 +72,7 @@ def localize_objects_uri(uri):
7272
from google.cloud import vision_v1p3beta1 as vision
7373
client = vision.ImageAnnotatorClient()
7474

75-
image = vision.types.Image()
75+
image = vision.Image()
7676
image.source.image_uri = uri
7777

7878
objects = client.object_localization(
@@ -100,12 +100,12 @@ def detect_handwritten_ocr(path):
100100
with io.open(path, 'rb') as image_file:
101101
content = image_file.read()
102102

103-
image = vision.types.Image(content=content)
103+
image = vision.Image(content=content)
104104

105105
# Language hint codes for handwritten OCR:
106106
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
107107
# Note: Use only one language hint code per request for handwritten OCR.
108-
image_context = vision.types.ImageContext(
108+
image_context = vision.ImageContext(
109109
language_hints=['en-t-i0-handwrit'])
110110

111111
response = client.document_text_detection(image=image,
@@ -149,13 +149,13 @@ def detect_handwritten_ocr_uri(uri):
149149
"""
150150
from google.cloud import vision_v1p3beta1 as vision
151151
client = vision.ImageAnnotatorClient()
152-
image = vision.types.Image()
152+
image = vision.Image()
153153
image.source.image_uri = uri
154154

155155
# Language hint codes for handwritten OCR:
156156
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
157157
# Note: Use only one language hint code per request for handwritten OCR.
158-
image_context = vision.types.ImageContext(
158+
image_context = vision.ImageContext(
159159
language_hints=['en-t-i0-handwrit'])
160160

161161
response = client.document_text_detection(image=image,
@@ -207,16 +207,16 @@ def detect_batch_annotate_files(path):
207207

208208
# Other supported mime_types: image/tiff' or 'image/gif'
209209
mime_type = 'application/pdf'
210-
input_config = vision.types.InputConfig(
210+
input_config = vision.InputConfig(
211211
content=content, mime_type=mime_type)
212212

213-
feature = vision.types.Feature(
214-
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
213+
feature = vision.Feature(
214+
type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION)
215215
# Annotate the first two pages and the last one (max 5 pages)
216216
# First page starts at 1, and not 0. Last page is -1.
217217
pages = [1, 2, -1]
218218

219-
request = vision.types.AnnotateFileRequest(
219+
request = vision.AnnotateFileRequest(
220220
input_config=input_config,
221221
features=[feature],
222222
pages=pages)
@@ -255,16 +255,16 @@ def detect_batch_annotate_files_uri(gcs_uri):
255255

256256
# Other supported mime_types: image/tiff' or 'image/gif'
257257
mime_type = 'application/pdf'
258-
input_config = vision.types.InputConfig(
259-
gcs_source=vision.types.GcsSource(uri=gcs_uri), mime_type=mime_type)
258+
input_config = vision.InputConfig(
259+
gcs_source=vision.GcsSource(uri=gcs_uri), mime_type=mime_type)
260260

261-
feature = vision.types.Feature(
262-
type=vision.enums.Feature.Type.DOCUMENT_TEXT_DETECTION)
261+
feature = vision.Feature(
262+
type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION)
263263
# Annotate the first two pages and the last one (max 5 pages)
264264
# First page starts at 1, and not 0. Last page is -1.
265265
pages = [1, 2, -1]
266266

267-
request = vision.types.AnnotateFileRequest(
267+
request = vision.AnnotateFileRequest(
268268
input_config=input_config,
269269
features=[feature],
270270
pages=pages)
@@ -299,24 +299,24 @@ def async_batch_annotate_images_uri(input_image_uri, output_uri):
299299
import re
300300

301301
from google.cloud import storage
302-
from google.protobuf import json_format
302+
303303
from google.cloud import vision_v1p4beta1 as vision
304304
client = vision.ImageAnnotatorClient()
305305

306306
# Construct the request for the image(s) to be annotated:
307-
image_source = vision.types.ImageSource(image_uri=input_image_uri)
308-
image = vision.types.Image(source=image_source)
307+
image_source = vision.ImageSource(image_uri=input_image_uri)
308+
image = vision.Image(source=image_source)
309309
features = [
310-
vision.types.Feature(type=vision.enums.Feature.Type.LABEL_DETECTION),
311-
vision.types.Feature(type=vision.enums.Feature.Type.TEXT_DETECTION),
312-
vision.types.Feature(type=vision.enums.Feature.Type.IMAGE_PROPERTIES),
310+
vision.Feature(type_=vision.Feature.Type.LABEL_DETECTION),
311+
vision.Feature(type_=vision.Feature.Type.TEXT_DETECTION),
312+
vision.Feature(type_=vision.Feature.Type.IMAGE_PROPERTIES),
313313
]
314314
requests = [
315-
vision.types.AnnotateImageRequest(image=image, features=features),
315+
vision.AnnotateImageRequest(image=image, features=features),
316316
]
317317

318-
gcs_destination = vision.types.GcsDestination(uri=output_uri)
319-
output_config = vision.types.OutputConfig(
318+
gcs_destination = vision.GcsDestination(uri=output_uri)
319+
output_config = vision.OutputConfig(
320320
gcs_destination=gcs_destination, batch_size=2)
321321

322322
operation = client.async_batch_annotate_images(
@@ -347,8 +347,7 @@ def async_batch_annotate_images_uri(input_image_uri, output_uri):
347347
output = blob_list[0]
348348

349349
json_string = output.download_as_string()
350-
response = json_format.Parse(json_string,
351-
vision.types.BatchAnnotateImagesResponse())
350+
response = vision.BatchAnnotateImagesResponse.from_json(json_string)
352351

353352
# Prints the actual response for the first annotate image request.
354353
print(u'The annotation response for the first request: {}'.format(

0 commit comments

Comments
 (0)