1
+ /*
2
+ * Copyright 2020 Google LLC
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ package com .example .video ;
18
+
19
+ // [START video_detect_person_beta]
20
+
21
+ import com .google .api .gax .longrunning .OperationFuture ;
22
+ import com .google .cloud .videointelligence .v1p3beta1 .AnnotateVideoProgress ;
23
+ import com .google .cloud .videointelligence .v1p3beta1 .AnnotateVideoRequest ;
24
+ import com .google .cloud .videointelligence .v1p3beta1 .AnnotateVideoResponse ;
25
+ import com .google .cloud .videointelligence .v1p3beta1 .DetectedAttribute ;
26
+ import com .google .cloud .videointelligence .v1p3beta1 .DetectedLandmark ;
27
+ import com .google .cloud .videointelligence .v1p3beta1 .Feature ;
28
+ import com .google .cloud .videointelligence .v1p3beta1 .PersonDetectionAnnotation ;
29
+ import com .google .cloud .videointelligence .v1p3beta1 .PersonDetectionConfig ;
30
+ import com .google .cloud .videointelligence .v1p3beta1 .TimestampedObject ;
31
+ import com .google .cloud .videointelligence .v1p3beta1 .Track ;
32
+ import com .google .cloud .videointelligence .v1p3beta1 .VideoAnnotationResults ;
33
+ import com .google .cloud .videointelligence .v1p3beta1 .VideoContext ;
34
+ import com .google .cloud .videointelligence .v1p3beta1 .VideoIntelligenceServiceClient ;
35
+ import com .google .cloud .videointelligence .v1p3beta1 .VideoSegment ;
36
+ import com .google .protobuf .ByteString ;
37
+
38
+ import java .nio .file .Files ;
39
+ import java .nio .file .Path ;
40
+ import java .nio .file .Paths ;
41
+
42
+ public class DetectPerson {
43
+
44
+ public static void detectPerson () throws Exception {
45
+ // TODO(developer): Replace these variables before running the sample.
46
+ String localFilePath = "resources/googlework_short.mp4" ;
47
+ detectPerson (localFilePath );
48
+ }
49
+
50
+
51
+ // Detects people in a video stored in a local file using the Cloud Video Intelligence API.
52
+ public static void detectPerson (String localFilePath ) throws Exception {
53
+ try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
54
+ VideoIntelligenceServiceClient .create ()) {
55
+ // Reads a local video file and converts it to base64.
56
+ Path path = Paths .get (localFilePath );
57
+ byte [] data = Files .readAllBytes (path );
58
+ ByteString inputContent = ByteString .copyFrom (data );
59
+
60
+ PersonDetectionConfig personDetectionConfig =
61
+ PersonDetectionConfig .newBuilder ()
62
+ // Must set includeBoundingBoxes to true to get poses and attributes.
63
+ .setIncludeBoundingBoxes (true )
64
+ .setIncludePoseLandmarks (true )
65
+ .setIncludeAttributes (true )
66
+ .build ();
67
+ VideoContext videoContext =
68
+ VideoContext .newBuilder ().setPersonDetectionConfig (personDetectionConfig ).build ();
69
+
70
+ AnnotateVideoRequest request =
71
+ AnnotateVideoRequest .newBuilder ()
72
+ .setInputContent (inputContent )
73
+ .addFeatures (Feature .PERSON_DETECTION )
74
+ .setVideoContext (videoContext )
75
+ .build ();
76
+
77
+ // Detects people in a video
78
+ // We get the first result because only one video is processed.
79
+ OperationFuture <AnnotateVideoResponse , AnnotateVideoProgress > future =
80
+ videoIntelligenceServiceClient .annotateVideoAsync (request );
81
+
82
+ System .out .println ("Waiting for operation to complete..." );
83
+ AnnotateVideoResponse response = future .get ();
84
+
85
+ // Gets annotations for video
86
+ VideoAnnotationResults annotationResult = response .getAnnotationResultsList ().get (0 );
87
+
88
+ // Annotations for list of people detected, tracked and recognized in video.
89
+ for (PersonDetectionAnnotation personDetectionAnnotation :
90
+ annotationResult .getPersonDetectionAnnotationsList ()) {
91
+ System .out .print ("Person detected:\n " );
92
+ for (Track track : personDetectionAnnotation .getTracksList ()) {
93
+ VideoSegment segment = track .getSegment ();
94
+ System .out .printf (
95
+ "\t Start: %d.%.0fs\n " ,
96
+ segment .getStartTimeOffset ().getSeconds (),
97
+ segment .getStartTimeOffset ().getNanos () / 1e6 );
98
+ System .out .printf (
99
+ "\t End: %d.%.0fs\n " ,
100
+ segment .getEndTimeOffset ().getSeconds (),
101
+ segment .getEndTimeOffset ().getNanos () / 1e6 );
102
+
103
+ // Each segment includes timestamped objects that include characteristic--e.g. clothes,
104
+ // posture of the person detected.
105
+ TimestampedObject firstTimestampedObject = track .getTimestampedObjects (0 );
106
+
107
+ // Attributes include unique pieces of clothing, poses, or hair color.
108
+ for (DetectedAttribute attribute : firstTimestampedObject .getAttributesList ()) {
109
+ System .out .printf (
110
+ "\t Attribute: %s; Value: %s\n " , attribute .getName (), attribute .getValue ());
111
+ }
112
+
113
+ // Landmarks in person detection include body parts.
114
+ for (DetectedLandmark attribute : firstTimestampedObject .getLandmarksList ()) {
115
+ System .out .printf (
116
+ "\t Landmark: %s; Vertex: %f, %f\n " ,
117
+ attribute .getName (), attribute .getPoint ().getX (), attribute .getPoint ().getY ());
118
+ }
119
+ }
120
+ }
121
+ }
122
+ }
123
+ }
124
+ // [END video_detect_person_beta]
0 commit comments