Skip to content
This repository was archived by the owner on Sep 16, 2023. It is now read-only.

Commit c746cfa

Browse files
munkhuushmgllesv
authored andcommitted
samples: feat: face and person detection samples (#2066)
Co-authored-by: Les Vogel <[email protected]>
1 parent 7d87ef4 commit c746cfa

File tree

8 files changed

+659
-0
lines changed

8 files changed

+659
-0
lines changed
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
/*
2+
* Copyright 2020 Google LLC
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.video;
18+
19+
// [START video_detect_faces_beta]
20+
21+
import com.google.api.gax.longrunning.OperationFuture;
22+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
23+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
24+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
25+
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
26+
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation;
27+
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig;
28+
import com.google.cloud.videointelligence.v1p3beta1.Feature;
29+
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
30+
import com.google.cloud.videointelligence.v1p3beta1.Track;
31+
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
32+
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
33+
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
34+
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
35+
import com.google.protobuf.ByteString;
36+
37+
import java.io.IOException;
38+
import java.nio.file.Files;
39+
import java.nio.file.Path;
40+
import java.nio.file.Paths;
41+
import java.util.concurrent.ExecutionException;
42+
43+
public class DetectFaces {
44+
45+
public static void detectFaces() throws Exception {
46+
// TODO(developer): Replace these variables before running the sample.
47+
String localFilePath = "resources/googlework_short.mp4";
48+
detectFaces(localFilePath);
49+
}
50+
51+
// Detects faces in a video stored in a local file using the Cloud Video Intelligence API.
52+
public static void detectFaces(String localFilePath) throws Exception {
53+
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
54+
VideoIntelligenceServiceClient.create()) {
55+
// Reads a local video file and converts it to base64.
56+
Path path = Paths.get(localFilePath);
57+
byte[] data = Files.readAllBytes(path);
58+
ByteString inputContent = ByteString.copyFrom(data);
59+
60+
FaceDetectionConfig faceDetectionConfig =
61+
FaceDetectionConfig.newBuilder()
62+
// Must set includeBoundingBoxes to true to get facial attributes.
63+
.setIncludeBoundingBoxes(true)
64+
.setIncludeAttributes(true)
65+
.build();
66+
VideoContext videoContext =
67+
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
68+
69+
AnnotateVideoRequest request =
70+
AnnotateVideoRequest.newBuilder()
71+
.setInputContent(inputContent)
72+
.addFeatures(Feature.FACE_DETECTION)
73+
.setVideoContext(videoContext)
74+
.build();
75+
76+
// Detects faces in a video
77+
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
78+
videoIntelligenceServiceClient.annotateVideoAsync(request);
79+
80+
System.out.println("Waiting for operation to complete...");
81+
AnnotateVideoResponse response = future.get();
82+
83+
// Gets annotations for video
84+
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
85+
86+
// Annotations for list of faces detected, tracked and recognized in video.
87+
for (FaceDetectionAnnotation faceDetectionAnnotation :
88+
annotationResult.getFaceDetectionAnnotationsList()) {
89+
System.out.print("Face detected:\n");
90+
for (Track track : faceDetectionAnnotation.getTracksList()) {
91+
VideoSegment segment = track.getSegment();
92+
System.out.printf(
93+
"\tStart: %d.%.0fs\n",
94+
segment.getStartTimeOffset().getSeconds(),
95+
segment.getStartTimeOffset().getNanos() / 1e6);
96+
System.out.printf(
97+
"\tEnd: %d.%.0fs\n",
98+
segment.getEndTimeOffset().getSeconds(),
99+
segment.getEndTimeOffset().getNanos() / 1e6);
100+
101+
// Each segment includes timestamped objects that
102+
// include characteristics of the face detected.
103+
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
104+
105+
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
106+
// Attributes include unique pieces of clothing, like glasses, poses, or hair color.
107+
System.out.printf("\tAttribute: %s;\n", attribute.getName());
108+
}
109+
}
110+
}
111+
}
112+
}
113+
}
114+
// [END video_detect_faces_beta]
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
/*
2+
* Copyright 2020 Google LLC
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.video;
18+
19+
// [START video_detect_faces_gcs_beta]
20+
21+
import com.google.api.gax.longrunning.OperationFuture;
22+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
23+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
24+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
25+
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
26+
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionAnnotation;
27+
import com.google.cloud.videointelligence.v1p3beta1.FaceDetectionConfig;
28+
import com.google.cloud.videointelligence.v1p3beta1.Feature;
29+
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
30+
import com.google.cloud.videointelligence.v1p3beta1.Track;
31+
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
32+
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
33+
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
34+
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
35+
36+
public class DetectFacesGcs {
37+
38+
public static void detectFacesGcs() throws Exception {
39+
// TODO(developer): Replace these variables before running the sample.
40+
String gcsUri = "gs://cloud-samples-data/video/googlework_short.mp4";
41+
detectFacesGcs(gcsUri);
42+
}
43+
44+
// Detects faces in a video stored in Google Cloud Storage using the Cloud Video Intelligence API.
45+
public static void detectFacesGcs(String gcsUri) throws Exception {
46+
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
47+
VideoIntelligenceServiceClient.create()) {
48+
49+
FaceDetectionConfig faceDetectionConfig =
50+
FaceDetectionConfig.newBuilder()
51+
// Must set includeBoundingBoxes to true to get facial attributes.
52+
.setIncludeBoundingBoxes(true)
53+
.setIncludeAttributes(true)
54+
.build();
55+
VideoContext videoContext =
56+
VideoContext.newBuilder().setFaceDetectionConfig(faceDetectionConfig).build();
57+
58+
AnnotateVideoRequest request =
59+
AnnotateVideoRequest.newBuilder()
60+
.setInputUri(gcsUri)
61+
.addFeatures(Feature.FACE_DETECTION)
62+
.setVideoContext(videoContext)
63+
.build();
64+
65+
// Detects faces in a video
66+
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
67+
videoIntelligenceServiceClient.annotateVideoAsync(request);
68+
69+
System.out.println("Waiting for operation to complete...");
70+
AnnotateVideoResponse response = future.get();
71+
72+
// Gets annotations for video
73+
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
74+
75+
// Annotations for list of people detected, tracked and recognized in video.
76+
for (FaceDetectionAnnotation faceDetectionAnnotation :
77+
annotationResult.getFaceDetectionAnnotationsList()) {
78+
System.out.print("Face detected:\n");
79+
for (Track track : faceDetectionAnnotation.getTracksList()) {
80+
VideoSegment segment = track.getSegment();
81+
System.out.printf(
82+
"\tStart: %d.%.0fs\n",
83+
segment.getStartTimeOffset().getSeconds(),
84+
segment.getStartTimeOffset().getNanos() / 1e6);
85+
System.out.printf(
86+
"\tEnd: %d.%.0fs\n",
87+
segment.getEndTimeOffset().getSeconds(),
88+
segment.getEndTimeOffset().getNanos() / 1e6);
89+
90+
// Each segment includes timestamped objects that
91+
// include characteristics of the face detected.
92+
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
93+
94+
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
95+
// Attributes include unique pieces of clothing, like glasses,
96+
// poses, or hair color.
97+
System.out.printf("\tAttribute: %s;\n", attribute.getName());
98+
}
99+
}
100+
}
101+
}
102+
}
103+
}
104+
// [END video_detect_faces_gcs_beta]
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
/*
2+
* Copyright 2020 Google LLC
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.example.video;
18+
19+
// [START video_detect_person_beta]
20+
21+
import com.google.api.gax.longrunning.OperationFuture;
22+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoProgress;
23+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoRequest;
24+
import com.google.cloud.videointelligence.v1p3beta1.AnnotateVideoResponse;
25+
import com.google.cloud.videointelligence.v1p3beta1.DetectedAttribute;
26+
import com.google.cloud.videointelligence.v1p3beta1.DetectedLandmark;
27+
import com.google.cloud.videointelligence.v1p3beta1.Feature;
28+
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionAnnotation;
29+
import com.google.cloud.videointelligence.v1p3beta1.PersonDetectionConfig;
30+
import com.google.cloud.videointelligence.v1p3beta1.TimestampedObject;
31+
import com.google.cloud.videointelligence.v1p3beta1.Track;
32+
import com.google.cloud.videointelligence.v1p3beta1.VideoAnnotationResults;
33+
import com.google.cloud.videointelligence.v1p3beta1.VideoContext;
34+
import com.google.cloud.videointelligence.v1p3beta1.VideoIntelligenceServiceClient;
35+
import com.google.cloud.videointelligence.v1p3beta1.VideoSegment;
36+
import com.google.protobuf.ByteString;
37+
38+
import java.nio.file.Files;
39+
import java.nio.file.Path;
40+
import java.nio.file.Paths;
41+
42+
public class DetectPerson {
43+
44+
public static void detectPerson() throws Exception {
45+
// TODO(developer): Replace these variables before running the sample.
46+
String localFilePath = "resources/googlework_short.mp4";
47+
detectPerson(localFilePath);
48+
}
49+
50+
51+
// Detects people in a video stored in a local file using the Cloud Video Intelligence API.
52+
public static void detectPerson(String localFilePath) throws Exception {
53+
try (VideoIntelligenceServiceClient videoIntelligenceServiceClient =
54+
VideoIntelligenceServiceClient.create()) {
55+
// Reads a local video file and converts it to base64.
56+
Path path = Paths.get(localFilePath);
57+
byte[] data = Files.readAllBytes(path);
58+
ByteString inputContent = ByteString.copyFrom(data);
59+
60+
PersonDetectionConfig personDetectionConfig =
61+
PersonDetectionConfig.newBuilder()
62+
// Must set includeBoundingBoxes to true to get poses and attributes.
63+
.setIncludeBoundingBoxes(true)
64+
.setIncludePoseLandmarks(true)
65+
.setIncludeAttributes(true)
66+
.build();
67+
VideoContext videoContext =
68+
VideoContext.newBuilder().setPersonDetectionConfig(personDetectionConfig).build();
69+
70+
AnnotateVideoRequest request =
71+
AnnotateVideoRequest.newBuilder()
72+
.setInputContent(inputContent)
73+
.addFeatures(Feature.PERSON_DETECTION)
74+
.setVideoContext(videoContext)
75+
.build();
76+
77+
// Detects people in a video
78+
// We get the first result because only one video is processed.
79+
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
80+
videoIntelligenceServiceClient.annotateVideoAsync(request);
81+
82+
System.out.println("Waiting for operation to complete...");
83+
AnnotateVideoResponse response = future.get();
84+
85+
// Gets annotations for video
86+
VideoAnnotationResults annotationResult = response.getAnnotationResultsList().get(0);
87+
88+
// Annotations for list of people detected, tracked and recognized in video.
89+
for (PersonDetectionAnnotation personDetectionAnnotation :
90+
annotationResult.getPersonDetectionAnnotationsList()) {
91+
System.out.print("Person detected:\n");
92+
for (Track track : personDetectionAnnotation.getTracksList()) {
93+
VideoSegment segment = track.getSegment();
94+
System.out.printf(
95+
"\tStart: %d.%.0fs\n",
96+
segment.getStartTimeOffset().getSeconds(),
97+
segment.getStartTimeOffset().getNanos() / 1e6);
98+
System.out.printf(
99+
"\tEnd: %d.%.0fs\n",
100+
segment.getEndTimeOffset().getSeconds(),
101+
segment.getEndTimeOffset().getNanos() / 1e6);
102+
103+
// Each segment includes timestamped objects that include characteristic--e.g. clothes,
104+
// posture of the person detected.
105+
TimestampedObject firstTimestampedObject = track.getTimestampedObjects(0);
106+
107+
// Attributes include unique pieces of clothing, poses, or hair color.
108+
for (DetectedAttribute attribute : firstTimestampedObject.getAttributesList()) {
109+
System.out.printf(
110+
"\tAttribute: %s; Value: %s\n", attribute.getName(), attribute.getValue());
111+
}
112+
113+
// Landmarks in person detection include body parts.
114+
for (DetectedLandmark attribute : firstTimestampedObject.getLandmarksList()) {
115+
System.out.printf(
116+
"\tLandmark: %s; Vertex: %f, %f\n",
117+
attribute.getName(), attribute.getPoint().getX(), attribute.getPoint().getY());
118+
}
119+
}
120+
}
121+
}
122+
}
123+
}
124+
// [END video_detect_person_beta]

0 commit comments

Comments
 (0)