Skip to content

Commit 126916f

Browse files
bingatgoogleleahecole
authored andcommitted
samples: add streaming_automl_action_recognition sample (#215)
Add code samples for streaming action recognition inference.
1 parent c073b65 commit 126916f

File tree

2 files changed

+97
-0
lines changed

2 files changed

+97
-0
lines changed

videointelligence/samples/analyze/beta_snippets.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@
4040
4141
python beta_snippets.py streaming-automl-object-tracking resources/cat.mp4 \
4242
$PROJECT_ID $MODEL_ID
43+
44+
python beta_snippets.py streaming-automl-action-recognition \
45+
resources/cat.mp4 $PROJECT_ID $MODEL_ID
4346
"""
4447

4548
import argparse
@@ -743,6 +746,81 @@ def stream_generator():
743746
# [END video_streaming_automl_object_tracking_beta]
744747

745748

749+
def streaming_automl_action_recognition(path, project_id, model_id):
750+
# [START video_streaming_automl_action_recognition_beta]
751+
import io
752+
753+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
754+
755+
# path = 'path_to_file'
756+
# project_id = 'project_id'
757+
# model_id = 'automl_action_recognition_model_id'
758+
759+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
760+
761+
model_path = "projects/{}/locations/us-central1/models/{}".format(
762+
project_id, model_id
763+
)
764+
765+
automl_config = videointelligence.StreamingAutomlActionRecognitionConfig(
766+
model_name=model_path
767+
)
768+
769+
video_config = videointelligence.StreamingVideoConfig(
770+
feature=videointelligence.StreamingFeature.STREAMING_AUTOML_ACTION_RECOGNITION,
771+
automl_action_recognition_config=automl_config,
772+
)
773+
774+
# config_request should be the first in the stream of requests.
775+
config_request = videointelligence.StreamingAnnotateVideoRequest(
776+
video_config=video_config
777+
)
778+
779+
# Set the chunk size to 5MB (recommended less than 10MB).
780+
chunk_size = 5 * 1024 * 1024
781+
782+
def stream_generator():
783+
yield config_request
784+
# Load file content.
785+
# Note: Input videos must have supported video codecs. See
786+
# https://cloud.google.com/video-intelligence/docs/streaming/streaming#supported_video_codecs
787+
# for more details.
788+
with io.open(path, "rb") as video_file:
789+
while True:
790+
data = video_file.read(chunk_size)
791+
if not data:
792+
break
793+
yield videointelligence.StreamingAnnotateVideoRequest(
794+
input_content=data
795+
)
796+
797+
requests = stream_generator()
798+
799+
# streaming_annotate_video returns a generator.
800+
# The default timeout is about 300 seconds.
801+
# To process longer videos it should be set to
802+
# larger than the length (in seconds) of the video.
803+
responses = client.streaming_annotate_video(requests, timeout=900)
804+
805+
# Each response corresponds to about 1 second of video.
806+
for response in responses:
807+
# Check for errors.
808+
if response.error.message:
809+
print(response.error.message)
810+
break
811+
812+
for label in response.annotation_results.label_annotations:
813+
for frame in label.frames:
814+
print(
815+
"At {:3d}s segment, {:5.1%} {}".format(
816+
frame.time_offset.seconds,
817+
frame.confidence,
818+
label.entity.entity_id,
819+
)
820+
)
821+
# [END video_streaming_automl_action_recognition_beta]
822+
823+
746824
if __name__ == "__main__":
747825
parser = argparse.ArgumentParser(
748826
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
@@ -804,6 +882,13 @@ def stream_generator():
804882
video_streaming_automl_object_tracking_parser.add_argument("project_id")
805883
video_streaming_automl_object_tracking_parser.add_argument("model_id")
806884

885+
video_streaming_automl_action_recognition_parser = subparsers.add_parser(
886+
"streaming-automl-action-recognition", help=streaming_automl_action_recognition.__doc__
887+
)
888+
video_streaming_automl_action_recognition_parser.add_argument("path")
889+
video_streaming_automl_action_recognition_parser.add_argument("project_id")
890+
video_streaming_automl_action_recognition_parser.add_argument("model_id")
891+
807892
args = parser.parse_args()
808893

809894
if args.command == "transcription":
@@ -826,3 +911,5 @@ def stream_generator():
826911
streaming_automl_classification(args.path, args.project_id, args.model_id)
827912
elif args.command == "streaming-automl-object-tracking":
828913
streaming_automl_object_tracking(args.path, args.project_id, args.model_id)
914+
elif args.command == "streaming-automl-action-recognition":
915+
streaming_automl_action_recognition(args.path, args.project_id, args.model_id)

videointelligence/samples/analyze/beta_snippets_test.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,3 +161,13 @@ def test_streaming_automl_object_tracking(capsys, video_path):
161161
beta_snippets.streaming_automl_object_tracking(video_path, project_id, model_id)
162162
out, _ = capsys.readouterr()
163163
assert "Track Id" in out
164+
165+
166+
# Flaky Gateway
167+
@pytest.mark.flaky(max_runs=3, min_passes=1)
168+
def test_streaming_automl_action_recognition(capsys, video_path):
169+
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
170+
model_id = "2509833202491719680"
171+
beta_snippets.streaming_automl_action_recognition(video_path, project_id, model_id)
172+
out, _ = capsys.readouterr()
173+
assert "segment" in out

0 commit comments

Comments
 (0)