14
14
# See the License for the specific language governing permissions and
15
15
# limitations under the License.
16
16
17
- """This application demonstrates face detection, label detection,
17
+ """This application demonstrates label detection,
18
18
explicit content, and shot change detection using the Google Cloud API.
19
19
20
20
Usage Examples:
21
21
22
- python analyze.py faces gs://demomaker/google_gmail.mp4
23
22
python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4
24
23
python analyze.py labels_file resources/cat.mp4
25
24
python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4
@@ -55,52 +54,6 @@ def analyze_explicit_content(path):
55
54
likely_string [frame .pornography_likelihood ]))
56
55
57
56
58
- def analyze_faces (path ):
59
- """ Detects faces given a GCS path. """
60
- video_client = videointelligence .VideoIntelligenceServiceClient ()
61
- features = [videointelligence .enums .Feature .FACE_DETECTION ]
62
-
63
- config = videointelligence .types .FaceDetectionConfig (
64
- include_bounding_boxes = True )
65
- context = videointelligence .types .VideoContext (
66
- face_detection_config = config )
67
-
68
- operation = video_client .annotate_video (
69
- path , features = features , video_context = context )
70
- print ('\n Processing video for face annotations:' )
71
-
72
- result = operation .result (timeout = 600 )
73
- print ('\n Finished processing.' )
74
-
75
- # first result is retrieved because a single video was processed
76
- faces = result .annotation_results [0 ].face_annotations
77
- for face_id , face in enumerate (faces ):
78
- print ('Face {}' .format (face_id ))
79
- print ('Thumbnail size: {}' .format (len (face .thumbnail )))
80
-
81
- for segment_id , segment in enumerate (face .segments ):
82
- start_time = (segment .segment .start_time_offset .seconds +
83
- segment .segment .start_time_offset .nanos / 1e9 )
84
- end_time = (segment .segment .end_time_offset .seconds +
85
- segment .segment .end_time_offset .nanos / 1e9 )
86
- positions = '{}s to {}s' .format (start_time , end_time )
87
- print ('\t Segment {}: {}' .format (segment_id , positions ))
88
-
89
- # There are typically many frames for each face,
90
- # here we print information on only the first frame.
91
- frame = face .frames [0 ]
92
- time_offset = (frame .time_offset .seconds +
93
- frame .time_offset .nanos / 1e9 )
94
- box = frame .normalized_bounding_boxes [0 ]
95
- print ('First frame time offset: {}s' .format (time_offset ))
96
- print ('First frame normalized bounding box:' )
97
- print ('\t left: {}' .format (box .left ))
98
- print ('\t top: {}' .format (box .top ))
99
- print ('\t right: {}' .format (box .right ))
100
- print ('\t bottom: {}' .format (box .bottom ))
101
- print ('\n ' )
102
-
103
-
104
57
def analyze_labels (path ):
105
58
""" Detects labels given a GCS path. """
106
59
video_client = videointelligence .VideoIntelligenceServiceClient ()
@@ -275,9 +228,6 @@ def analyze_shots(path):
275
228
description = __doc__ ,
276
229
formatter_class = argparse .RawDescriptionHelpFormatter )
277
230
subparsers = parser .add_subparsers (dest = 'command' )
278
- analyze_faces_parser = subparsers .add_parser (
279
- 'faces' , help = analyze_faces .__doc__ )
280
- analyze_faces_parser .add_argument ('path' )
281
231
analyze_labels_parser = subparsers .add_parser (
282
232
'labels' , help = analyze_labels .__doc__ )
283
233
analyze_labels_parser .add_argument ('path' )
@@ -293,8 +243,6 @@ def analyze_shots(path):
293
243
294
244
args = parser .parse_args ()
295
245
296
- if args .command == 'faces' :
297
- analyze_faces (args .path )
298
246
if args .command == 'labels' :
299
247
analyze_labels (args .path )
300
248
if args .command == 'labels_file' :
0 commit comments