@@ -48,7 +48,7 @@ def localize_objects(path):
48
48
49
49
with open (path , 'rb' ) as image_file :
50
50
content = image_file .read ()
51
- image = vision .types . Image (content = content )
51
+ image = vision .Image (content = content )
52
52
53
53
objects = client .object_localization (
54
54
image = image ).localized_object_annotations
@@ -72,7 +72,7 @@ def localize_objects_uri(uri):
72
72
from google .cloud import vision_v1p3beta1 as vision
73
73
client = vision .ImageAnnotatorClient ()
74
74
75
- image = vision .types . Image ()
75
+ image = vision .Image ()
76
76
image .source .image_uri = uri
77
77
78
78
objects = client .object_localization (
@@ -100,12 +100,12 @@ def detect_handwritten_ocr(path):
100
100
with io .open (path , 'rb' ) as image_file :
101
101
content = image_file .read ()
102
102
103
- image = vision .types . Image (content = content )
103
+ image = vision .Image (content = content )
104
104
105
105
# Language hint codes for handwritten OCR:
106
106
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
107
107
# Note: Use only one language hint code per request for handwritten OCR.
108
- image_context = vision .types . ImageContext (
108
+ image_context = vision .ImageContext (
109
109
language_hints = ['en-t-i0-handwrit' ])
110
110
111
111
response = client .document_text_detection (image = image ,
@@ -149,13 +149,13 @@ def detect_handwritten_ocr_uri(uri):
149
149
"""
150
150
from google .cloud import vision_v1p3beta1 as vision
151
151
client = vision .ImageAnnotatorClient ()
152
- image = vision .types . Image ()
152
+ image = vision .Image ()
153
153
image .source .image_uri = uri
154
154
155
155
# Language hint codes for handwritten OCR:
156
156
# en-t-i0-handwrit, mul-Latn-t-i0-handwrit
157
157
# Note: Use only one language hint code per request for handwritten OCR.
158
- image_context = vision .types . ImageContext (
158
+ image_context = vision .ImageContext (
159
159
language_hints = ['en-t-i0-handwrit' ])
160
160
161
161
response = client .document_text_detection (image = image ,
@@ -207,16 +207,16 @@ def detect_batch_annotate_files(path):
207
207
208
208
# Other supported mime_types: image/tiff' or 'image/gif'
209
209
mime_type = 'application/pdf'
210
- input_config = vision .types . InputConfig (
210
+ input_config = vision .InputConfig (
211
211
content = content , mime_type = mime_type )
212
212
213
- feature = vision .types . Feature (
214
- type = vision . enums .Feature .Type .DOCUMENT_TEXT_DETECTION )
213
+ feature = vision .Feature (
214
+ type_ = vision .Feature .Type .DOCUMENT_TEXT_DETECTION )
215
215
# Annotate the first two pages and the last one (max 5 pages)
216
216
# First page starts at 1, and not 0. Last page is -1.
217
217
pages = [1 , 2 , - 1 ]
218
218
219
- request = vision .types . AnnotateFileRequest (
219
+ request = vision .AnnotateFileRequest (
220
220
input_config = input_config ,
221
221
features = [feature ],
222
222
pages = pages )
@@ -255,16 +255,16 @@ def detect_batch_annotate_files_uri(gcs_uri):
255
255
256
256
# Other supported mime_types: image/tiff' or 'image/gif'
257
257
mime_type = 'application/pdf'
258
- input_config = vision .types . InputConfig (
259
- gcs_source = vision .types . GcsSource (uri = gcs_uri ), mime_type = mime_type )
258
+ input_config = vision .InputConfig (
259
+ gcs_source = vision .GcsSource (uri = gcs_uri ), mime_type = mime_type )
260
260
261
- feature = vision .types . Feature (
262
- type = vision . enums .Feature .Type .DOCUMENT_TEXT_DETECTION )
261
+ feature = vision .Feature (
262
+ type_ = vision .Feature .Type .DOCUMENT_TEXT_DETECTION )
263
263
# Annotate the first two pages and the last one (max 5 pages)
264
264
# First page starts at 1, and not 0. Last page is -1.
265
265
pages = [1 , 2 , - 1 ]
266
266
267
- request = vision .types . AnnotateFileRequest (
267
+ request = vision .AnnotateFileRequest (
268
268
input_config = input_config ,
269
269
features = [feature ],
270
270
pages = pages )
@@ -299,24 +299,24 @@ def async_batch_annotate_images_uri(input_image_uri, output_uri):
299
299
import re
300
300
301
301
from google .cloud import storage
302
- from google . protobuf import json_format
302
+
303
303
from google .cloud import vision_v1p4beta1 as vision
304
304
client = vision .ImageAnnotatorClient ()
305
305
306
306
# Construct the request for the image(s) to be annotated:
307
- image_source = vision .types . ImageSource (image_uri = input_image_uri )
308
- image = vision .types . Image (source = image_source )
307
+ image_source = vision .ImageSource (image_uri = input_image_uri )
308
+ image = vision .Image (source = image_source )
309
309
features = [
310
- vision .types . Feature (type = vision . enums .Feature .Type .LABEL_DETECTION ),
311
- vision .types . Feature (type = vision . enums .Feature .Type .TEXT_DETECTION ),
312
- vision .types . Feature (type = vision . enums .Feature .Type .IMAGE_PROPERTIES ),
310
+ vision .Feature (type_ = vision .Feature .Type .LABEL_DETECTION ),
311
+ vision .Feature (type_ = vision .Feature .Type .TEXT_DETECTION ),
312
+ vision .Feature (type_ = vision .Feature .Type .IMAGE_PROPERTIES ),
313
313
]
314
314
requests = [
315
- vision .types . AnnotateImageRequest (image = image , features = features ),
315
+ vision .AnnotateImageRequest (image = image , features = features ),
316
316
]
317
317
318
- gcs_destination = vision .types . GcsDestination (uri = output_uri )
319
- output_config = vision .types . OutputConfig (
318
+ gcs_destination = vision .GcsDestination (uri = output_uri )
319
+ output_config = vision .OutputConfig (
320
320
gcs_destination = gcs_destination , batch_size = 2 )
321
321
322
322
operation = client .async_batch_annotate_images (
@@ -347,8 +347,7 @@ def async_batch_annotate_images_uri(input_image_uri, output_uri):
347
347
output = blob_list [0 ]
348
348
349
349
json_string = output .download_as_string ()
350
- response = json_format .Parse (json_string ,
351
- vision .types .BatchAnnotateImagesResponse ())
350
+ response = vision .BatchAnnotateImagesResponse .from_json (json_string )
352
351
353
352
# Prints the actual response for the first annotate image request.
354
353
print (u'The annotation response for the first request: {}' .format (
0 commit comments