30
30
import argparse
31
31
import base64
32
32
import io
33
- import sys
34
- import time
35
33
36
- from google .cloud import videointelligence_v1beta2
37
- from google .cloud .videointelligence_v1beta2 import enums
38
- from google .cloud .videointelligence_v1beta2 import types
34
+ from google .cloud import videointelligence
39
35
40
36
41
37
def analyze_explicit_content (path ):
42
38
""" Detects explicit content from the GCS path to a video. """
43
- video_client = videointelligence_v1beta2 .VideoIntelligenceServiceClient ()
44
- features = [enums .Feature .EXPLICIT_CONTENT_DETECTION ]
39
+ video_client = videointelligence .VideoIntelligenceServiceClient ()
40
+ features = [videointelligence . enums .Feature .EXPLICIT_CONTENT_DETECTION ]
45
41
46
- operation = video_client .annotate_video (path , features )
42
+ operation = video_client .annotate_video (path , features = features )
47
43
print ('\n Processing video for explicit content annotations:' )
48
44
49
- while not operation .done ():
50
- sys .stdout .write ('.' )
51
- sys .stdout .flush ()
52
- time .sleep (15 )
53
-
45
+ result = operation .result (timeout = 90 )
54
46
print ('\n Finished processing.' )
55
47
56
- # first result is retrieved because a single video was processed
57
- explicit_annotation = (operation .result ().annotation_results [0 ].
58
- explicit_annotation )
59
-
60
48
likely_string = ("Unknown" , "Very unlikely" , "Unlikely" , "Possible" ,
61
49
"Likely" , "Very likely" )
62
50
63
- for frame in explicit_annotation .frames :
51
+ # first result is retrieved because a single video was processed
52
+ for frame in result .annotation_results [0 ].explicit_annotation .frames :
64
53
frame_time = frame .time_offset .seconds + frame .time_offset .nanos / 1e9
65
54
print ('Time: {}s' .format (frame_time ))
66
55
print ('\t pornography: {}' .format (
@@ -69,28 +58,24 @@ def analyze_explicit_content(path):
69
58
70
59
def analyze_faces (path ):
71
60
""" Detects faces given a GCS path. """
72
- video_client = videointelligence_v1beta2 .VideoIntelligenceServiceClient ()
73
- features = [enums .Feature .FACE_DETECTION ]
61
+ video_client = videointelligence .VideoIntelligenceServiceClient ()
62
+ features = [videointelligence . enums .Feature .FACE_DETECTION ]
74
63
75
- config = types .FaceDetectionConfig (include_bounding_boxes = True )
76
- context = types .VideoContext (face_detection_config = config )
64
+ config = videointelligence .types .FaceDetectionConfig (
65
+ include_bounding_boxes = True )
66
+ context = videointelligence .types .VideoContext (
67
+ face_detection_config = config )
77
68
78
69
operation = video_client .annotate_video (
79
- path , features , video_context = context )
70
+ path , features = features , video_context = context )
80
71
print ('\n Processing video for face annotations:' )
81
72
82
- while not operation .done ():
83
- sys .stdout .write ('.' )
84
- sys .stdout .flush ()
85
- time .sleep (15 )
86
-
73
+ result = operation .result (timeout = 600 )
87
74
print ('\n Finished processing.' )
88
75
89
76
# first result is retrieved because a single video was processed
90
- face_annotations = (operation .result ().annotation_results [0 ].
91
- face_annotations )
92
-
93
- for face_id , face in enumerate (face_annotations ):
77
+ faces = result .annotation_results [0 ].face_annotations
78
+ for face_id , face in enumerate (faces ):
94
79
print ('Face {}' .format (face_id ))
95
80
print ('Thumbnail size: {}' .format (len (face .thumbnail )))
96
81
@@ -119,29 +104,25 @@ def analyze_faces(path):
119
104
120
105
def analyze_labels (path ):
121
106
""" Detects labels given a GCS path. """
122
- video_client = videointelligence_v1beta2 .VideoIntelligenceServiceClient ()
123
- features = [enums .Feature .LABEL_DETECTION ]
107
+ video_client = videointelligence .VideoIntelligenceServiceClient ()
108
+ features = [videointelligence . enums .Feature .LABEL_DETECTION ]
124
109
125
- config = types .LabelDetectionConfig (
126
- label_detection_mode = enums .LabelDetectionMode .SHOT_AND_FRAME_MODE )
127
- context = types .VideoContext (label_detection_config = config )
110
+ mode = videointelligence .enums .LabelDetectionMode .SHOT_AND_FRAME_MODE
111
+ config = videointelligence .types .LabelDetectionConfig (
112
+ label_detection_mode = mode )
113
+ context = videointelligence .types .VideoContext (
114
+ label_detection_config = config )
128
115
129
116
operation = video_client .annotate_video (
130
- path , features , video_context = context )
117
+ path , features = features , video_context = context )
131
118
print ('\n Processing video for label annotations:' )
132
119
133
- while not operation .done ():
134
- sys .stdout .write ('.' )
135
- sys .stdout .flush ()
136
- time .sleep (15 )
137
-
120
+ result = operation .result (timeout = 90 )
138
121
print ('\n Finished processing.' )
139
122
140
- # first result is retrieved because a single video was processed
141
- results = operation .result ().annotation_results [0 ]
142
-
143
123
# Process video/segment level label annotations
144
- for i , segment_label in enumerate (results .segment_label_annotations ):
124
+ segment_labels = result .annotation_results [0 ].segment_label_annotations
125
+ for i , segment_label in enumerate (segment_labels ):
145
126
print ('Video label description: {}' .format (
146
127
segment_label .entity .description ))
147
128
for category_entity in segment_label .category_entities :
@@ -160,7 +141,8 @@ def analyze_labels(path):
160
141
print ('\n ' )
161
142
162
143
# Process shot level label annotations
163
- for i , shot_label in enumerate (results .shot_label_annotations ):
144
+ shot_labels = result .annotation_results [0 ].shot_label_annotations
145
+ for i , shot_label in enumerate (shot_labels ):
164
146
print ('Shot label description: {}' .format (
165
147
shot_label .entity .description ))
166
148
for category_entity in shot_label .category_entities :
@@ -179,7 +161,8 @@ def analyze_labels(path):
179
161
print ('\n ' )
180
162
181
163
# Process frame level label annotations
182
- for i , frame_label in enumerate (results .frame_label_annotations ):
164
+ frame_labels = result .annotation_results [0 ].frame_label_annotations
165
+ for i , frame_label in enumerate (frame_labels ):
183
166
print ('Frame label description: {}' .format (
184
167
frame_label .entity .description ))
185
168
for category_entity in frame_label .category_entities :
@@ -198,28 +181,22 @@ def analyze_labels(path):
198
181
199
182
def analyze_labels_file (path ):
200
183
""" Detects labels given a file path. """
201
- video_client = videointelligence_v1beta2 .VideoIntelligenceServiceClient ()
202
- features = [enums .Feature .LABEL_DETECTION ]
184
+ video_client = videointelligence .VideoIntelligenceServiceClient ()
185
+ features = [videointelligence . enums .Feature .LABEL_DETECTION ]
203
186
204
187
with io .open (path , "rb" ) as movie :
205
188
content_base64 = base64 .b64encode (movie .read ())
206
189
207
190
operation = video_client .annotate_video (
208
- '' , features , input_content = content_base64 )
191
+ '' , features = features , input_content = content_base64 )
209
192
print ('\n Processing video for label annotations:' )
210
193
211
- while not operation .done ():
212
- sys .stdout .write ('.' )
213
- sys .stdout .flush ()
214
- time .sleep (15 )
215
-
194
+ result = operation .result (timeout = 90 )
216
195
print ('\n Finished processing.' )
217
196
218
- # first result is retrieved because a single video was processed
219
- results = operation .result ().annotation_results [0 ]
220
-
221
197
# Process video/segment level label annotations
222
- for i , segment_label in enumerate (results .segment_label_annotations ):
198
+ segment_labels = result .annotation_results [0 ].segment_label_annotations
199
+ for i , segment_label in enumerate (segment_labels ):
223
200
print ('Video label description: {}' .format (
224
201
segment_label .entity .description ))
225
202
for category_entity in segment_label .category_entities :
@@ -238,7 +215,8 @@ def analyze_labels_file(path):
238
215
print ('\n ' )
239
216
240
217
# Process shot level label annotations
241
- for i , shot_label in enumerate (results .shot_label_annotations ):
218
+ shot_labels = result .annotation_results [0 ].shot_label_annotations
219
+ for i , shot_label in enumerate (shot_labels ):
242
220
print ('Shot label description: {}' .format (
243
221
shot_label .entity .description ))
244
222
for category_entity in shot_label .category_entities :
@@ -257,7 +235,8 @@ def analyze_labels_file(path):
257
235
print ('\n ' )
258
236
259
237
# Process frame level label annotations
260
- for i , frame_label in enumerate (results .frame_label_annotations ):
238
+ frame_labels = result .annotation_results [0 ].frame_label_annotations
239
+ for i , frame_label in enumerate (frame_labels ):
261
240
print ('Frame label description: {}' .format (
262
241
frame_label .entity .description ))
263
242
for category_entity in frame_label .category_entities :
@@ -275,22 +254,16 @@ def analyze_labels_file(path):
275
254
276
255
def analyze_shots (path ):
277
256
""" Detects camera shot changes. """
278
- video_client = videointelligence_v1beta2 .VideoIntelligenceServiceClient ()
279
- features = [enums .Feature .SHOT_CHANGE_DETECTION ]
280
- operation = video_client .annotate_video (path , features )
257
+ video_client = videointelligence .VideoIntelligenceServiceClient ()
258
+ features = [videointelligence . enums .Feature .SHOT_CHANGE_DETECTION ]
259
+ operation = video_client .annotate_video (path , features = features )
281
260
print ('\n Processing video for shot change annotations:' )
282
261
283
- while not operation .done ():
284
- sys .stdout .write ('.' )
285
- sys .stdout .flush ()
286
- time .sleep (15 )
287
-
262
+ result = operation .result (timeout = 90 )
288
263
print ('\n Finished processing.' )
289
264
290
265
# first result is retrieved because a single video was processed
291
- shots = operation .result ().annotation_results [0 ].shot_annotations
292
-
293
- for i , shot in enumerate (shots ):
266
+ for i , shot in enumerate (result .annotation_results [0 ].shot_annotations ):
294
267
start_time = (shot .start_time_offset .seconds +
295
268
shot .start_time_offset .nanos / 1e9 )
296
269
end_time = (shot .end_time_offset .seconds +
0 commit comments