1
1
#!/usr/bin/env python
2
2
3
- # Copyright 2017 Google Inc . All Rights Reserved.
3
+ # Copyright 2019 Google LLC . All Rights Reserved.
4
4
#
5
5
# Licensed under the Apache License, Version 2.0 (the "License");
6
6
# you may not use this file except in compliance with the License.
36
36
37
37
python beta_snippets.py streaming-annotation-storage resources/cat.mp4 \
38
38
gs://mybucket/myfolder
39
+
40
+ python beta_snippets.py streaming-automl-classification resources/cat.mp4 \
41
+ $PROJECT_ID $MODEL_ID
39
42
"""
40
43
41
44
import argparse
@@ -629,6 +632,79 @@ def stream_generator():
629
632
# [END video_streaming_annotation_to_storage_beta]
630
633
631
634
635
+ def streaming_automl_classification (path , project_id , model_id ):
636
+ # [START video_streaming_automl_classification_beta]
637
+ import io
638
+
639
+ from google .cloud import videointelligence_v1p3beta1 as videointelligence
640
+ from google .cloud .videointelligence_v1p3beta1 import enums
641
+
642
+ # path = 'path_to_file'
643
+ # project_id = 'gcp_project_id'
644
+ # model_id = 'automl_classification_model_id'
645
+
646
+ client = videointelligence .StreamingVideoIntelligenceServiceClient ()
647
+
648
+ model_path = 'projects/{}/locations/us-central1/models/{}' .format (
649
+ project_id , model_id )
650
+
651
+ # Here we use classification as an example.
652
+ automl_config = (videointelligence .types
653
+ .StreamingAutomlClassificationConfig (
654
+ model_name = model_path ))
655
+
656
+ video_config = videointelligence .types .StreamingVideoConfig (
657
+ feature = enums .StreamingFeature .STREAMING_AUTOML_CLASSIFICATION ,
658
+ automl_classification_config = automl_config )
659
+
660
+ # config_request should be the first in the stream of requests.
661
+ config_request = videointelligence .types .StreamingAnnotateVideoRequest (
662
+ video_config = video_config )
663
+
664
+ # Set the chunk size to 5MB (recommended less than 10MB).
665
+ chunk_size = 5 * 1024 * 1024
666
+
667
+ # Load file content.
668
+ # Note: Input videos must have supported video codecs. See
669
+ # https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
670
+ # for more details.
671
+ stream = []
672
+ with io .open (path , 'rb' ) as video_file :
673
+ while True :
674
+ data = video_file .read (chunk_size )
675
+ if not data :
676
+ break
677
+ stream .append (data )
678
+
679
+ def stream_generator ():
680
+ yield config_request
681
+ for chunk in stream :
682
+ yield videointelligence .types .StreamingAnnotateVideoRequest (
683
+ input_content = chunk )
684
+
685
+ requests = stream_generator ()
686
+
687
+ # streaming_annotate_video returns a generator.
688
+ # The default timeout is about 300 seconds.
689
+ # To process longer videos it should be set to
690
+ # larger than the length (in seconds) of the stream.
691
+ responses = client .streaming_annotate_video (requests , timeout = 600 )
692
+
693
+ for response in responses :
694
+ # Check for errors.
695
+ if response .error .message :
696
+ print (response .error .message )
697
+ break
698
+
699
+ for label in response .annotation_results .label_annotations :
700
+ for frame in label .frames :
701
+ print ("At {:3d}s segment, {:5.1%} {}" .format (
702
+ frame .time_offset .seconds ,
703
+ frame .confidence ,
704
+ label .entity .entity_id ))
705
+ # [END video_streaming_automl_classification_beta]
706
+
707
+
632
708
if __name__ == '__main__' :
633
709
parser = argparse .ArgumentParser (
634
710
description = __doc__ ,
@@ -678,6 +754,13 @@ def stream_generator():
678
754
video_streaming_annotation_to_storage_parser .add_argument ('path' )
679
755
video_streaming_annotation_to_storage_parser .add_argument ('output_uri' )
680
756
757
+ video_streaming_automl_classification_parser = subparsers .add_parser (
758
+ 'streaming-automl-classification' ,
759
+ help = streaming_automl_classification .__doc__ )
760
+ video_streaming_automl_classification_parser .add_argument ('path' )
761
+ video_streaming_automl_classification_parser .add_argument ('project_id' )
762
+ video_streaming_automl_classification_parser .add_argument ('model_id' )
763
+
681
764
args = parser .parse_args ()
682
765
683
766
if args .command == 'transcription' :
@@ -700,3 +783,6 @@ def stream_generator():
700
783
detect_explicit_content_streaming (args .path )
701
784
elif args .command == 'streaming-annotation-storage' :
702
785
annotation_to_storage_streaming (args .path , args .output_uri )
786
+ elif args .command == 'streaming-automl-classification' :
787
+ streaming_automl_classification (
788
+ args .path , args .project_id , args .model_id )
0 commit comments