Skip to content

Commit 91f7428

Browse files
dizcologydanoscarmike
authored andcommitted
update samples to v1 [(#1221)](#1221)
* update samples to v1 * replace while loop with operation.result(timeout) * addressing review comments * flake * flake
1 parent 7d9a733 commit 91f7428

File tree

3 files changed

+50
-75
lines changed

3 files changed

+50
-75
lines changed

videointelligence/samples/analyze/analyze.py

Lines changed: 47 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -30,37 +30,26 @@
3030
import argparse
3131
import base64
3232
import io
33-
import sys
34-
import time
3533

36-
from google.cloud import videointelligence_v1beta2
37-
from google.cloud.videointelligence_v1beta2 import enums
38-
from google.cloud.videointelligence_v1beta2 import types
34+
from google.cloud import videointelligence
3935

4036

4137
def analyze_explicit_content(path):
4238
""" Detects explicit content from the GCS path to a video. """
43-
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
44-
features = [enums.Feature.EXPLICIT_CONTENT_DETECTION]
39+
video_client = videointelligence.VideoIntelligenceServiceClient()
40+
features = [videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION]
4541

46-
operation = video_client.annotate_video(path, features)
42+
operation = video_client.annotate_video(path, features=features)
4743
print('\nProcessing video for explicit content annotations:')
4844

49-
while not operation.done():
50-
sys.stdout.write('.')
51-
sys.stdout.flush()
52-
time.sleep(15)
53-
45+
result = operation.result(timeout=90)
5446
print('\nFinished processing.')
5547

56-
# first result is retrieved because a single video was processed
57-
explicit_annotation = (operation.result().annotation_results[0].
58-
explicit_annotation)
59-
6048
likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible",
6149
"Likely", "Very likely")
6250

63-
for frame in explicit_annotation.frames:
51+
# first result is retrieved because a single video was processed
52+
for frame in result.annotation_results[0].explicit_annotation.frames:
6453
frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
6554
print('Time: {}s'.format(frame_time))
6655
print('\tpornography: {}'.format(
@@ -69,28 +58,24 @@ def analyze_explicit_content(path):
6958

7059
def analyze_faces(path):
7160
""" Detects faces given a GCS path. """
72-
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
73-
features = [enums.Feature.FACE_DETECTION]
61+
video_client = videointelligence.VideoIntelligenceServiceClient()
62+
features = [videointelligence.enums.Feature.FACE_DETECTION]
7463

75-
config = types.FaceDetectionConfig(include_bounding_boxes=True)
76-
context = types.VideoContext(face_detection_config=config)
64+
config = videointelligence.types.FaceDetectionConfig(
65+
include_bounding_boxes=True)
66+
context = videointelligence.types.VideoContext(
67+
face_detection_config=config)
7768

7869
operation = video_client.annotate_video(
79-
path, features, video_context=context)
70+
path, features=features, video_context=context)
8071
print('\nProcessing video for face annotations:')
8172

82-
while not operation.done():
83-
sys.stdout.write('.')
84-
sys.stdout.flush()
85-
time.sleep(15)
86-
73+
result = operation.result(timeout=600)
8774
print('\nFinished processing.')
8875

8976
# first result is retrieved because a single video was processed
90-
face_annotations = (operation.result().annotation_results[0].
91-
face_annotations)
92-
93-
for face_id, face in enumerate(face_annotations):
77+
faces = result.annotation_results[0].face_annotations
78+
for face_id, face in enumerate(faces):
9479
print('Face {}'.format(face_id))
9580
print('Thumbnail size: {}'.format(len(face.thumbnail)))
9681

@@ -119,29 +104,25 @@ def analyze_faces(path):
119104

120105
def analyze_labels(path):
121106
""" Detects labels given a GCS path. """
122-
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
123-
features = [enums.Feature.LABEL_DETECTION]
107+
video_client = videointelligence.VideoIntelligenceServiceClient()
108+
features = [videointelligence.enums.Feature.LABEL_DETECTION]
124109

125-
config = types.LabelDetectionConfig(
126-
label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE)
127-
context = types.VideoContext(label_detection_config=config)
110+
mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
111+
config = videointelligence.types.LabelDetectionConfig(
112+
label_detection_mode=mode)
113+
context = videointelligence.types.VideoContext(
114+
label_detection_config=config)
128115

129116
operation = video_client.annotate_video(
130-
path, features, video_context=context)
117+
path, features=features, video_context=context)
131118
print('\nProcessing video for label annotations:')
132119

133-
while not operation.done():
134-
sys.stdout.write('.')
135-
sys.stdout.flush()
136-
time.sleep(15)
137-
120+
result = operation.result(timeout=90)
138121
print('\nFinished processing.')
139122

140-
# first result is retrieved because a single video was processed
141-
results = operation.result().annotation_results[0]
142-
143123
# Process video/segment level label annotations
144-
for i, segment_label in enumerate(results.segment_label_annotations):
124+
segment_labels = result.annotation_results[0].segment_label_annotations
125+
for i, segment_label in enumerate(segment_labels):
145126
print('Video label description: {}'.format(
146127
segment_label.entity.description))
147128
for category_entity in segment_label.category_entities:
@@ -160,7 +141,8 @@ def analyze_labels(path):
160141
print('\n')
161142

162143
# Process shot level label annotations
163-
for i, shot_label in enumerate(results.shot_label_annotations):
144+
shot_labels = result.annotation_results[0].shot_label_annotations
145+
for i, shot_label in enumerate(shot_labels):
164146
print('Shot label description: {}'.format(
165147
shot_label.entity.description))
166148
for category_entity in shot_label.category_entities:
@@ -179,7 +161,8 @@ def analyze_labels(path):
179161
print('\n')
180162

181163
# Process frame level label annotations
182-
for i, frame_label in enumerate(results.frame_label_annotations):
164+
frame_labels = result.annotation_results[0].frame_label_annotations
165+
for i, frame_label in enumerate(frame_labels):
183166
print('Frame label description: {}'.format(
184167
frame_label.entity.description))
185168
for category_entity in frame_label.category_entities:
@@ -198,28 +181,22 @@ def analyze_labels(path):
198181

199182
def analyze_labels_file(path):
200183
""" Detects labels given a file path. """
201-
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
202-
features = [enums.Feature.LABEL_DETECTION]
184+
video_client = videointelligence.VideoIntelligenceServiceClient()
185+
features = [videointelligence.enums.Feature.LABEL_DETECTION]
203186

204187
with io.open(path, "rb") as movie:
205188
content_base64 = base64.b64encode(movie.read())
206189

207190
operation = video_client.annotate_video(
208-
'', features, input_content=content_base64)
191+
'', features=features, input_content=content_base64)
209192
print('\nProcessing video for label annotations:')
210193

211-
while not operation.done():
212-
sys.stdout.write('.')
213-
sys.stdout.flush()
214-
time.sleep(15)
215-
194+
result = operation.result(timeout=90)
216195
print('\nFinished processing.')
217196

218-
# first result is retrieved because a single video was processed
219-
results = operation.result().annotation_results[0]
220-
221197
# Process video/segment level label annotations
222-
for i, segment_label in enumerate(results.segment_label_annotations):
198+
segment_labels = result.annotation_results[0].segment_label_annotations
199+
for i, segment_label in enumerate(segment_labels):
223200
print('Video label description: {}'.format(
224201
segment_label.entity.description))
225202
for category_entity in segment_label.category_entities:
@@ -238,7 +215,8 @@ def analyze_labels_file(path):
238215
print('\n')
239216

240217
# Process shot level label annotations
241-
for i, shot_label in enumerate(results.shot_label_annotations):
218+
shot_labels = result.annotation_results[0].shot_label_annotations
219+
for i, shot_label in enumerate(shot_labels):
242220
print('Shot label description: {}'.format(
243221
shot_label.entity.description))
244222
for category_entity in shot_label.category_entities:
@@ -257,7 +235,8 @@ def analyze_labels_file(path):
257235
print('\n')
258236

259237
# Process frame level label annotations
260-
for i, frame_label in enumerate(results.frame_label_annotations):
238+
frame_labels = result.annotation_results[0].frame_label_annotations
239+
for i, frame_label in enumerate(frame_labels):
261240
print('Frame label description: {}'.format(
262241
frame_label.entity.description))
263242
for category_entity in frame_label.category_entities:
@@ -275,22 +254,16 @@ def analyze_labels_file(path):
275254

276255
def analyze_shots(path):
277256
""" Detects camera shot changes. """
278-
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
279-
features = [enums.Feature.SHOT_CHANGE_DETECTION]
280-
operation = video_client.annotate_video(path, features)
257+
video_client = videointelligence.VideoIntelligenceServiceClient()
258+
features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION]
259+
operation = video_client.annotate_video(path, features=features)
281260
print('\nProcessing video for shot change annotations:')
282261

283-
while not operation.done():
284-
sys.stdout.write('.')
285-
sys.stdout.flush()
286-
time.sleep(15)
287-
262+
result = operation.result(timeout=90)
288263
print('\nFinished processing.')
289264

290265
# first result is retrieved because a single video was processed
291-
shots = operation.result().annotation_results[0].shot_annotations
292-
293-
for i, shot in enumerate(shots):
266+
for i, shot in enumerate(result.annotation_results[0].shot_annotations):
294267
start_time = (shot.start_time_offset.seconds +
295268
shot.start_time_offset.nanos / 1e9)
296269
end_time = (shot.end_time_offset.seconds +

videointelligence/samples/analyze/analyze_test.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@
1515
# limitations under the License.
1616

1717
import os
18+
1819
import pytest
20+
1921
import analyze
2022

2123

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
google-cloud-videointelligence==0.28.0
1+
google-cloud-videointelligence==1.0.0

0 commit comments

Comments
 (0)