Skip to content

update videointelligence streaming samples #2215

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 25 additions & 12 deletions video/cloud-client/analyze/beta_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,17 +339,23 @@ def stream_generator():
print(response.error.message)
break

# Get the time offset of the response.
frame = response.annotation_results.label_annotations[0].frames[0]
time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
print('{}s:'.format(time_offset))
label_annotations = response.annotation_results.label_annotations

# label_annotations could be empty
if not label_annotations:
continue

for annotation in label_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = frame.time_offset.seconds + \
frame.time_offset.nanos / 1e9

for annotation in response.annotation_results.label_annotations:
description = annotation.entity.description
# Every annotation has only one frame
confidence = annotation.frames[0].confidence
# description is in Unicode
print(u'\t{} (confidence: {})'.format(description, confidence))
print(u'{}s: {} (confidence: {})'.format(
time_offset, description, confidence))
# [END video_streaming_label_detection_beta]


Expand Down Expand Up @@ -463,19 +469,26 @@ def stream_generator():
print(response.error.message)
break

# Get the time offset of the response.
frame = response.annotation_results.object_annotations[0].frames[0]
time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
print('{}s:'.format(time_offset))
object_annotations = response.annotation_results.object_annotations

# object_annotations could be empty
if not object_annotations:
continue

for annotation in object_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = frame.time_offset.seconds + \
frame.time_offset.nanos / 1e9

for annotation in response.annotation_results.object_annotations:
description = annotation.entity.description
confidence = annotation.confidence

# track_id tracks the same object in the video.
track_id = annotation.track_id

# description is in Unicode
print('{}s'.format(time_offset))
print(u'\tEntity description: {}'.format(description))
print('\tTrack Id: {}'.format(track_id))
if annotation.entity.entity_id:
Expand Down