From f0a181b0415300764f7a02bdc891a2ead4d2aebc Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Thu, 5 Apr 2018 13:22:54 -0700 Subject: [PATCH 1/4] remove face detection samples --- video/cloud-client/analyze/beta_snippets.py | 121 +----------------- .../analyze/beta_snippets_test.py | 23 +--- 2 files changed, 4 insertions(+), 140 deletions(-) diff --git a/video/cloud-client/analyze/beta_snippets.py b/video/cloud-client/analyze/beta_snippets.py index 12ad4197138..d2b491a8e47 100644 --- a/video/cloud-client/analyze/beta_snippets.py +++ b/video/cloud-client/analyze/beta_snippets.py @@ -18,12 +18,6 @@ and speech transcription using the Google Cloud API. Usage Examples: - python beta_snippets.py boxes \ - gs://python-docs-samples-tests/video/googlework_short.mp4 - - python beta_snippets.py \ - emotions gs://python-docs-samples-tests/video/googlework_short.mp4 - python beta_snippets.py \ transcription gs://python-docs-samples-tests/video/googlework_short.mp4 """ @@ -33,108 +27,6 @@ from google.cloud import videointelligence_v1p1beta1 as videointelligence -# [START video_face_bounding_boxes] -def face_bounding_boxes(gcs_uri): - """ Detects faces' bounding boxes. """ - video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.FACE_DETECTION] - - config = videointelligence.types.FaceConfig( - include_bounding_boxes=True) - context = videointelligence.types.VideoContext( - face_detection_config=config) - - operation = video_client.annotate_video( - gcs_uri, features=features, video_context=context) - print('\nProcessing video for face annotations:') - - result = operation.result(timeout=900) - print('\nFinished processing.') - - # There is only one result because a single video was processed. - faces = result.annotation_results[0].face_detection_annotations - for i, face in enumerate(faces): - print('Face {}'.format(i)) - - # Each face_detection_annotation has only one segment. - segment = face.segments[0] - start_time = (segment.segment.start_time_offset.seconds + - segment.segment.start_time_offset.nanos / 1e9) - end_time = (segment.segment.end_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) - positions = '{}s to {}s'.format(start_time, end_time) - print('\tSegment: {}\n'.format(positions)) - - # Each detected face may appear in many frames of the video. - # Here we process only the first frame. - frame = face.frames[0] - - time_offset = (frame.time_offset.seconds + - frame.time_offset.nanos / 1e9) - box = frame.attributes[0].normalized_bounding_box - - print('First frame time offset: {}s\n'.format(time_offset)) - - print('First frame normalized bounding box:') - print('\tleft : {}'.format(box.left)) - print('\ttop : {}'.format(box.top)) - print('\tright : {}'.format(box.right)) - print('\tbottom: {}'.format(box.bottom)) - print('\n') -# [END video_face_bounding_boxes] - - -# [START video_face_emotions] -def face_emotions(gcs_uri): - """ Analyze faces' emotions over frames. """ - video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.FACE_DETECTION] - - config = videointelligence.types.FaceConfig( - include_emotions=True) - context = videointelligence.types.VideoContext( - face_detection_config=config) - - operation = video_client.annotate_video( - gcs_uri, features=features, video_context=context) - print('\nProcessing video for face annotations:') - - result = operation.result(timeout=600) - print('\nFinished processing.') - - # There is only one result because a single video was processed. - faces = result.annotation_results[0].face_detection_annotations - for i, face in enumerate(faces): - for j, frame in enumerate(face.frames): - time_offset = (frame.time_offset.seconds + - frame.time_offset.nanos / 1e9) - emotions = frame.attributes[0].emotions - - print('Face {}, frame {}, time_offset {}\n'.format( - i, j, time_offset)) - - # from videointelligence.enums - emotion_labels = ( - 'EMOTION_UNSPECIFIED', 'AMUSEMENT', 'ANGER', - 'CONCENTRATION', 'CONTENTMENT', 'DESIRE', - 'DISAPPOINTMENT', 'DISGUST', 'ELATION', - 'EMBARRASSMENT', 'INTEREST', 'PRIDE', 'SADNESS', - 'SURPRISE') - - for emotion in emotions: - emotion_index = emotion.emotion - emotion_label = emotion_labels[emotion_index] - emotion_score = emotion.score - - print('emotion: {} (confidence score: {})'.format( - emotion_label, emotion_score)) - - print('\n') - - print('\n') -# [END video_face_emotions] - - # [START video_speech_transcription] def speech_transcription(input_uri): """Transcribe speech from a video stored on GCS.""" @@ -181,13 +73,6 @@ def speech_transcription(input_uri): description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') - analyze_faces_parser = subparsers.add_parser( - 'boxes', help=face_bounding_boxes.__doc__) - analyze_faces_parser.add_argument('gcs_uri') - - analyze_emotions_parser = subparsers.add_parser( - 'emotions', help=face_emotions.__doc__) - analyze_emotions_parser.add_argument('gcs_uri') speech_transcription_parser = subparsers.add_parser( 'transcription', help=speech_transcription.__doc__) @@ -195,9 +80,5 @@ def speech_transcription(input_uri): args = parser.parse_args() - if args.command == 'boxes': - face_bounding_boxes(args.gcs_uri) - elif args.command == 'emotions': - face_emotions(args.gcs_uri) - elif args.command == 'transcription': + if args.command == 'transcription': speech_transcription(args.gcs_uri) diff --git a/video/cloud-client/analyze/beta_snippets_test.py b/video/cloud-client/analyze/beta_snippets_test.py index 6d27c2fd00e..679095aece6 100644 --- a/video/cloud-client/analyze/beta_snippets_test.py +++ b/video/cloud-client/analyze/beta_snippets_test.py @@ -16,34 +16,17 @@ import os -import pytest - import beta_snippets +import pytest BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -FACES_SHORT_FILE_PATH = 'video/googlework_short.mp4' - - -@pytest.mark.slow -def test_face_bounding_boxes(capsys): - beta_snippets.face_bounding_boxes( - 'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH)) - out, _ = capsys.readouterr() - assert 'top :' in out - - -@pytest.mark.slow -def test_face_emotions(capsys): - beta_snippets.face_emotions( - 'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH)) - out, _ = capsys.readouterr() - assert 'CONCENTRATION' in out +FILE_PATH = 'video/googlework_short.mp4' @pytest.mark.slow def test_speech_transcription(capsys): beta_snippets.speech_transcription( - 'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH)) + 'gs://{}/{}'.format(BUCKET, FILE_PATH)) out, _ = capsys.readouterr() assert 'cultural' in out From aca3336fbb64411ac5c797c7777ca57c0cdde6fc Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 6 Apr 2018 13:32:01 -0700 Subject: [PATCH 2/4] update docstring --- video/cloud-client/analyze/beta_snippets.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/beta_snippets.py b/video/cloud-client/analyze/beta_snippets.py index d2b491a8e47..1024ac0b734 100644 --- a/video/cloud-client/analyze/beta_snippets.py +++ b/video/cloud-client/analyze/beta_snippets.py @@ -14,8 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This application demonstrates face detection, face emotions -and speech transcription using the Google Cloud API. +"""This application demonstrates speech transcription using the Google Cloud API. Usage Examples: python beta_snippets.py \ From 5f39a0fc8aa9f9786627458bbca140d7390660cf Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 6 Apr 2018 14:18:15 -0700 Subject: [PATCH 3/4] linter --- video/cloud-client/analyze/beta_snippets.py | 3 ++- video/cloud-client/analyze/beta_snippets_test.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/beta_snippets.py b/video/cloud-client/analyze/beta_snippets.py index 1024ac0b734..ada4f9b4eb4 100644 --- a/video/cloud-client/analyze/beta_snippets.py +++ b/video/cloud-client/analyze/beta_snippets.py @@ -14,7 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This application demonstrates speech transcription using the Google Cloud API. +"""This application demonstrates speech transcription using the +Google Cloud API. Usage Examples: python beta_snippets.py \ diff --git a/video/cloud-client/analyze/beta_snippets_test.py b/video/cloud-client/analyze/beta_snippets_test.py index 679095aece6..586d8ecd4da 100644 --- a/video/cloud-client/analyze/beta_snippets_test.py +++ b/video/cloud-client/analyze/beta_snippets_test.py @@ -17,8 +17,8 @@ import os import beta_snippets -import pytest +import pytest BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] FILE_PATH = 'video/googlework_short.mp4' From b6f5c036f5e9e4a2281503a35776255966a1e7d7 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Fri, 6 Apr 2018 14:23:35 -0700 Subject: [PATCH 4/4] linter --- video/cloud-client/analyze/beta_snippets_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/video/cloud-client/analyze/beta_snippets_test.py b/video/cloud-client/analyze/beta_snippets_test.py index 586d8ecd4da..e86f4f8c0f5 100644 --- a/video/cloud-client/analyze/beta_snippets_test.py +++ b/video/cloud-client/analyze/beta_snippets_test.py @@ -16,10 +16,10 @@ import os -import beta_snippets - import pytest +import beta_snippets + BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] FILE_PATH = 'video/googlework_short.mp4'