diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 80aa33f2443..10be3471846 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -70,6 +70,7 @@ /speech/**/* @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/python-samples-reviewers /texttospeech/**/* @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/python-samples-reviewers /translate/**/* @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/python-samples-reviewers +/video/transcoder/* @GoogleCloudPlatform/dee-data-ai @GoogleCloudPlatform/python-samples-reviewers # Cloud SDK Databases & Data Analytics teams # ---* Cloud Native DB diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index ef971b7a08f..94bd237f9f9 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -219,6 +219,7 @@ assign_prs_by: - "api: speech" - "api: talent" - "api: texttospeech" + - "api: transcoder" - "api: translate" - "api: vision" to: diff --git a/video/transcoder/create_job_from_ad_hoc.py b/video/transcoder/create_job_from_ad_hoc.py new file mode 100644 index 00000000000..60832dde609 --- /dev/null +++ b/video/transcoder/create_job_from_ad_hoc.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on a supplied job config. + +Example usage: + python create_job_from_ad_hoc.py --project_id --location --input_uri --output_uri +""" + +# [START transcoder_create_job_from_ad_hoc] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_from_ad_hoc(project_id, location, input_uri, output_uri): + """Creates a job based on an ad-hoc job configuration. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="video-stream1", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=720, + width_pixels=1280, + bitrate_bps=2500000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + transcoder_v1.types.MuxStream( + key="hd", + container="mp4", + elementary_streams=["video-stream1", "audio-stream0"], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_from_ad_hoc] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_from_ad_hoc( + args.project_id, + args.location, + args.input_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_from_preset.py b/video/transcoder/create_job_from_preset.py new file mode 100644 index 00000000000..0bfbcb4c0ee --- /dev/null +++ b/video/transcoder/create_job_from_preset.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on a job preset. + +Example usage: + python create_job_from_preset.py --project_id --location --input_uri --output_uri [--preset ] +""" + +# [START transcoder_create_job_from_preset] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_from_preset(project_id, location, input_uri, output_uri, preset): + """Creates a job based on a job preset. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket. + preset: The preset template (for example, 'preset/web-hd').""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.template_id = preset + + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_from_preset] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + parser.add_argument( + "--preset", + help="The preset template (for example, 'preset/web-hd').", + default="preset/web-hd", + ) + args = parser.parse_args() + create_job_from_preset( + args.project_id, + args.location, + args.input_uri, + args.output_uri, + args.preset, + ) diff --git a/video/transcoder/create_job_from_template.py b/video/transcoder/create_job_from_template.py new file mode 100644 index 00000000000..331f5962140 --- /dev/null +++ b/video/transcoder/create_job_from_template.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on a job template. + +Example usage: + python create_job_from_template.py --project_id --location --input_uri --output_uri --template_id +""" + +# [START transcoder_create_job_from_template] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_from_template(project_id, location, input_uri, output_uri, template_id): + """Creates a job based on a job template. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket. + template_id: The user-defined template ID.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.template_id = template_id + + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_from_template] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + parser.add_argument( + "--template_id", + help="The job template ID. The template must be located in the same location as the job.", + required=True, + ) + args = parser.parse_args() + create_job_from_template( + args.project_id, + args.location, + args.input_uri, + args.output_uri, + args.template_id, + ) diff --git a/video/transcoder/create_job_template.py b/video/transcoder/create_job_template.py new file mode 100644 index 00000000000..78e16014177 --- /dev/null +++ b/video/transcoder/create_job_template.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job template. + +Example usage: + python create_job_template.py --project_id [--location ] [--template_id ] +""" + +# [START transcoder_create_job_template] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_template(project_id, location, template_id): + """Creates a job template. + + Args: + project_id: The GCP project ID. + location: The location to store this template in. + template_id: The user-defined template ID.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + + job_template = transcoder_v1.types.JobTemplate() + job_template.name = ( + f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}" + ) + job_template.config = transcoder_v1.types.JobConfig( + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="video-stream1", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=720, + width_pixels=1280, + bitrate_bps=2500000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + transcoder_v1.types.MuxStream( + key="hd", + container="mp4", + elementary_streams=["video-stream1", "audio-stream0"], + ), + ], + ) + + response = client.create_job_template( + parent=parent, job_template=job_template, job_template_id=template_id + ) + print(f"Job template: {response.name}") + return response + + +# [END transcoder_create_job_template] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to store this template in.", + default="us-central1", + ) + parser.add_argument( + "--template_id", help="The job template ID.", default="my-job-template" + ) + args = parser.parse_args() + create_job_template(args.project_id, args.location, args.template_id) diff --git a/video/transcoder/create_job_with_animated_overlay.py b/video/transcoder/create_job_with_animated_overlay.py new file mode 100644 index 00000000000..52940e85805 --- /dev/null +++ b/video/transcoder/create_job_with_animated_overlay.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on a supplied job config that includes an animated overlay. + +Example usage: + python create_job_with_animated_overlay.py --project_id --location --input_uri --overlay_image_uri --output_uri +""" + +# [START transcoder_create_job_with_animated_overlay] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) +from google.protobuf import duration_pb2 as duration + + +def create_job_with_animated_overlay( + project_id, location, input_uri, overlay_image_uri, output_uri +): + """Creates a job based on an ad-hoc job configuration that includes an animated image overlay. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + overlay_image_uri: Uri of the image for the overlay in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + ], + overlays=[ + transcoder_v1.types.Overlay( + image=transcoder_v1.types.Overlay.Image( + uri=overlay_image_uri, + resolution=transcoder_v1.types.Overlay.NormalizedCoordinate( + x=0, + y=0, + ), + alpha=1, + ), + animations=[ + transcoder_v1.types.Overlay.Animation( + animation_fade=transcoder_v1.types.Overlay.AnimationFade( + fade_type=transcoder_v1.types.Overlay.FadeType.FADE_IN, + xy=transcoder_v1.types.Overlay.NormalizedCoordinate( + x=0.5, + y=0.5, + ), + start_time_offset=duration.Duration( + seconds=5, + ), + end_time_offset=duration.Duration( + seconds=10, + ), + ), + ), + transcoder_v1.types.Overlay.Animation( + animation_fade=transcoder_v1.types.Overlay.AnimationFade( + fade_type=transcoder_v1.types.Overlay.FadeType.FADE_OUT, + xy=transcoder_v1.types.Overlay.NormalizedCoordinate( + x=0.5, + y=0.5, + ), + start_time_offset=duration.Duration( + seconds=12, + ), + end_time_offset=duration.Duration( + seconds=15, + ), + ), + ), + ], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_animated_overlay] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--overlay_image_uri", + help="Uri of the overlay image in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_animated_overlay( + args.project_id, + args.location, + args.input_uri, + args.overlay_image_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_concatenated_inputs.py b/video/transcoder/create_job_with_concatenated_inputs.py new file mode 100644 index 00000000000..4a64f88e114 --- /dev/null +++ b/video/transcoder/create_job_with_concatenated_inputs.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python + +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on concatenating two input videos. + +Example usage: + python create_job_with_concatenated_inputs.py --project_id --location \ + --input1_uri --start_time_input1 --end_time_input1 \ + --input2_uri --start_time_input2 --end_time_input2 \ + --output_uri +""" + +# [START transcoder_create_job_with_concatenated_inputs] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) +from google.protobuf import duration_pb2 as duration + + +def create_job_with_concatenated_inputs( + project_id, + location, + input1_uri, + start_time_input1, + end_time_input1, + input2_uri, + start_time_input2, + end_time_input2, + output_uri, +): + """Creates a job based on an ad-hoc job configuration that concatenates two input videos. + + Args: + project_id (str): The GCP project ID. + location (str): The location to start the job in. + input1_uri (str): Uri of the first video in the Cloud Storage bucket. + start_time_input1 (str): Start time, in fractional seconds ending in 's' + (e.g., '0s'), relative to the first input video timeline. + end_time_input1 (str): End time, in fractional seconds ending in 's' + (e.g., '8.1s'), relative to the first input video timeline. + input2_uri (str): Uri of the second video in the Cloud Storage bucket. + start_time_input2 (str): Start time, in fractional seconds ending in 's' + (e.g., '3.5s'), relative to the second input video timeline. + end_time_input2 (str): End time, in fractional seconds ending in 's' + (e.g., '15s'), relative to the second input video timeline. + output_uri (str): Uri of the video output folder in the Cloud Storage + bucket.""" + + s1 = duration.Duration() + s1.FromJsonString(start_time_input1) + e1 = duration.Duration() + e1.FromJsonString(end_time_input1) + + s2 = duration.Duration() + s2.FromJsonString(start_time_input2) + e2 = duration.Duration() + e2.FromJsonString(end_time_input2) + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + inputs=[ + transcoder_v1.types.Input( + key="input1", + uri=input1_uri, + ), + transcoder_v1.types.Input( + key="input2", + uri=input2_uri, + ), + ], + edit_list=[ + transcoder_v1.types.EditAtom( + key="atom1", + inputs=["input1"], + start_time_offset=s1, + end_time_offset=e1, + ), + transcoder_v1.types.EditAtom( + key="atom2", + inputs=["input2"], + start_time_offset=s2, + end_time_offset=e2, + ), + ], + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_concatenated_inputs] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input1_uri", + help="Uri of the first video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--start_time_input1", + help="Start time, in fractional seconds ending in 's' (e.g., '1.1s'), " + + "relative to the first input video timeline. Use this field to trim " + + "content from the beginning of the first video.", + required=True, + ) + parser.add_argument( + "--end_time_input1", + help="End time, in fractional seconds ending in 's' (e.g., '9.5s'), " + + "relative to the first input video timeline. Use this field to trim " + + "content from the end of the first video.", + required=True, + ) + parser.add_argument( + "--input2_uri", + help="Uri of the second video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--start_time_input2", + help="Start time, in fractional seconds ending in 's' (e.g., '1.1s'), " + + "relative to the second input video timeline. Use this field to trim " + + "content from the beginning of the second video.", + required=True, + ) + parser.add_argument( + "--end_time_input2", + help="End time, in fractional seconds ending in 's' (e.g., '9.5s'), " + + "relative to the second input video timeline. Use this field to trim " + + "content from the end of the second video.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. " + + "Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_concatenated_inputs( + args.project_id, + args.location, + args.input1_uri, + args.start_time_input1, + args.end_time_input1, + args.input2_uri, + args.start_time_input2, + args.end_time_input2, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_embedded_captions.py b/video/transcoder/create_job_with_embedded_captions.py new file mode 100644 index 00000000000..58f985dbaf6 --- /dev/null +++ b/video/transcoder/create_job_with_embedded_captions.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python + +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job that embeds captions in the output video. + +Example usage: + python create_job_with_embedded_captions.py --project_id --location \ + --input_video_uri --input_captions_uri --output_uri +""" + +# [START transcoder_create_job_with_embedded_captions] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_with_embedded_captions( + project_id, + location, + input_video_uri, + input_captions_uri, + output_uri, +): + """Creates a job based on an ad-hoc job configuration that embeds closed captions in the output video. + + Args: + project_id (str): The GCP project ID. + location (str): The location to start the job in. + input_video_uri (str): Uri of the input video in the Cloud Storage + bucket. + input_captions_uri (str): Uri of the input captions file in the Cloud + Storage bucket. + output_uri (str): Uri of the video output folder in the Cloud Storage + bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + inputs=[ + transcoder_v1.types.Input( + key="input0", + uri=input_video_uri, + ), + transcoder_v1.types.Input( + key="caption-input0", + uri=input_captions_uri, + ), + ], + edit_list=[ + transcoder_v1.types.EditAtom( + key="atom0", + inputs=["input0", "caption-input0"], + ), + ], + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", + bitrate_bps=64000, + ), + ), + transcoder_v1.types.ElementaryStream( + key="cea-stream0", + text_stream=transcoder_v1.types.TextStream( + codec="cea608", + mapping_=[ + transcoder_v1.types.TextStream.TextMapping( + atom_key="atom0", + input_key="caption-input0", + input_track=0, + ), + ], + language_code="en-US", + display_name="English", + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd-hls", + container="ts", + elementary_streams=["video-stream0", "audio-stream0"], + ), + transcoder_v1.types.MuxStream( + key="sd-dash", + container="fmp4", + elementary_streams=["video-stream0"], + ), + transcoder_v1.types.MuxStream( + key="audio-dash", + container="fmp4", + elementary_streams=["audio-stream0"], + ), + ], + manifests=[ + transcoder_v1.types.Manifest( + file_name="manifest.m3u8", + type_="HLS", + mux_streams=["sd-hls"], + ), + transcoder_v1.types.Manifest( + file_name="manifest.mpd", + type_="DASH", + mux_streams=["sd-dash", "audio-dash"], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_embedded_captions] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_video_uri", + help="Uri of the input video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--input_captions_uri", + help="Uri of the input captions file in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. " + + "Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_embedded_captions( + args.project_id, + args.location, + args.input_video_uri, + args.input_captions_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_periodic_images_spritesheet.py b/video/transcoder/create_job_with_periodic_images_spritesheet.py new file mode 100644 index 00000000000..1690ba6444f --- /dev/null +++ b/video/transcoder/create_job_with_periodic_images_spritesheet.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job that generates two spritesheets from the input video. Each spritesheet contains images that are captured periodically. + +Example usage: + python create_job_with_periodic_images_spritesheet.py --project_id --location --input_uri --output_uri +""" + +# [START transcoder_create_job_with_periodic_images_spritesheet] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) +from google.protobuf import duration_pb2 as duration + + +def create_job_with_periodic_images_spritesheet( + project_id, location, input_uri, output_uri +): + """Creates a job based on an ad-hoc job configuration that generates two spritesheets. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + # Create an ad-hoc job. For more information, see https://cloud.google.com/transcoder/docs/how-to/jobs#create_jobs_ad_hoc. + # See all options for the job config at https://cloud.google.com/transcoder/docs/reference/rest/v1/JobConfig. + elementary_streams=[ + # This section defines the output video stream. + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + # This section defines the output audio stream. + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + # This section multiplexes the output audio and video together into a container. + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + ], + # Generate two sprite sheets from the input video into the GCS bucket. For more information, see + # https://cloud.google.com/transcoder/docs/how-to/generate-spritesheet#generate_image_periodically. + sprite_sheets=[ + # Generate a sprite sheet with 64x32px images. An image is taken every 7 seconds from the video. + transcoder_v1.types.SpriteSheet( + file_prefix="small-sprite-sheet", + sprite_width_pixels=64, + sprite_height_pixels=32, + interval=duration.Duration( + seconds=7, + ), + ), + # Generate a sprite sheet with 128x72px images. An image is taken every 7 seconds from the video. + transcoder_v1.types.SpriteSheet( + file_prefix="large-sprite-sheet", + sprite_width_pixels=128, + sprite_height_pixels=72, + interval=duration.Duration( + seconds=7, + ), + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_periodic_images_spritesheet] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_periodic_images_spritesheet( + args.project_id, + args.location, + args.input_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_set_number_images_spritesheet.py b/video/transcoder/create_job_with_set_number_images_spritesheet.py new file mode 100644 index 00000000000..bc196fb46cc --- /dev/null +++ b/video/transcoder/create_job_with_set_number_images_spritesheet.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job that generates two spritesheets from the input video. Each spritesheet contains a set number of images. + +Example usage: + python create_job_with_set_number_images_spritesheet.py --project_id --location --input_uri --output_uri +""" + +# [START transcoder_create_job_with_set_number_images_spritesheet] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def create_job_with_set_number_images_spritesheet( + project_id, location, input_uri, output_uri +): + """Creates a job based on an ad-hoc job configuration that generates two spritesheets. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + # Create an ad-hoc job. For more information, see https://cloud.google.com/transcoder/docs/how-to/jobs#create_jobs_ad_hoc. + # See all options for the job config at https://cloud.google.com/transcoder/docs/reference/rest/v1/JobConfig. + elementary_streams=[ + # This section defines the output video stream. + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + # This section defines the output audio stream. + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + # This section multiplexes the output audio and video together into a container. + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + ], + # Generate two sprite sheets from the input video into the GCS bucket. For more information, see + # https://cloud.google.com/transcoder/docs/how-to/generate-spritesheet#generate_set_number_of_images. + sprite_sheets=[ + # Generate a 10x10 sprite sheet with 64x32px images. + transcoder_v1.types.SpriteSheet( + file_prefix="small-sprite-sheet", + sprite_width_pixels=64, + sprite_height_pixels=32, + column_count=10, + row_count=10, + total_count=100, + ), + # Generate a 10x10 sprite sheet with 128x72px images. + transcoder_v1.types.SpriteSheet( + file_prefix="large-sprite-sheet", + sprite_width_pixels=128, + sprite_height_pixels=72, + column_count=10, + row_count=10, + total_count=100, + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_set_number_images_spritesheet] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_set_number_images_spritesheet( + args.project_id, + args.location, + args.input_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_standalone_captions.py b/video/transcoder/create_job_with_standalone_captions.py new file mode 100644 index 00000000000..4524185d673 --- /dev/null +++ b/video/transcoder/create_job_with_standalone_captions.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python + +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job that can use subtitles from a standalone file. + +Example usage: + python create_job_with_standalone_captions.py --project_id --location \ + --input_video_uri --input_subtitles1_uri --input_subtitles2_uri --output_uri +""" + +# [START transcoder_create_job_with_standalone_captions] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) +from google.protobuf import duration_pb2 as duration + + +def create_job_with_standalone_captions( + project_id, + location, + input_video_uri, + input_subtitles1_uri, + input_subtitles2_uri, + output_uri, +): + """Creates a job based on an ad-hoc job configuration that can use subtitles from a standalone file. + + Args: + project_id (str): The GCP project ID. + location (str): The location to start the job in. + input_video_uri (str): Uri of the input video in the Cloud Storage + bucket. + input_subtitles1_uri (str): Uri of an input subtitles file in the Cloud + Storage bucket. + input_subtitles2_uri (str): Uri of an input subtitles file in the Cloud + Storage bucket. + output_uri (str): Uri of the video output folder in the Cloud Storage + bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + inputs=[ + transcoder_v1.types.Input( + key="input0", + uri=input_video_uri, + ), + transcoder_v1.types.Input( + key="subtitle-input-en", + uri=input_subtitles1_uri, + ), + transcoder_v1.types.Input( + key="subtitle-input-es", + uri=input_subtitles2_uri, + ), + ], + edit_list=[ + transcoder_v1.types.EditAtom( + key="atom0", + inputs=["input0", "subtitle-input-en", "subtitle-input-es"], + ), + ], + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", + bitrate_bps=64000, + ), + ), + transcoder_v1.types.ElementaryStream( + key="vtt-stream-en", + text_stream=transcoder_v1.types.TextStream( + codec="webvtt", + language_code="en-US", + display_name="English", + mapping_=[ + transcoder_v1.types.TextStream.TextMapping( + atom_key="atom0", + input_key="subtitle-input-en", + ), + ], + ), + ), + transcoder_v1.types.ElementaryStream( + key="vtt-stream-es", + text_stream=transcoder_v1.types.TextStream( + codec="webvtt", + language_code="es-ES", + display_name="Spanish", + mapping_=[ + transcoder_v1.types.TextStream.TextMapping( + atom_key="atom0", + input_key="subtitle-input-es", + ), + ], + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd-hls-fmp4", + container="fmp4", + elementary_streams=["video-stream0"], + ), + transcoder_v1.types.MuxStream( + key="audio-hls-fmp4", + container="fmp4", + elementary_streams=["audio-stream0"], + ), + transcoder_v1.types.MuxStream( + key="text-vtt-en", + container="vtt", + elementary_streams=["vtt-stream-en"], + segment_settings=transcoder_v1.types.SegmentSettings( + segment_duration=duration.Duration( + seconds=6, + ), + individual_segments=True, + ), + ), + transcoder_v1.types.MuxStream( + key="text-vtt-es", + container="vtt", + elementary_streams=["vtt-stream-es"], + segment_settings=transcoder_v1.types.SegmentSettings( + segment_duration=duration.Duration( + seconds=6, + ), + individual_segments=True, + ), + ), + ], + manifests=[ + transcoder_v1.types.Manifest( + file_name="manifest.m3u8", + type_="HLS", + mux_streams=[ + "sd-hls-fmp4", + "audio-hls-fmp4", + "text-vtt-en", + "text-vtt-es", + ], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_standalone_captions] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_video_uri", + help="Uri of the input video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--input_subtitles1_uri", + help="Uri of an input subtitles file in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--input_subtitles2_uri", + help="Uri of an input subtitles file in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. " + + "Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_standalone_captions( + args.project_id, + args.location, + args.input_video_uri, + args.input_subtitles1_uri, + args.input_subtitles2_uri, + args.output_uri, + ) diff --git a/video/transcoder/create_job_with_static_overlay.py b/video/transcoder/create_job_with_static_overlay.py new file mode 100644 index 00000000000..67839bb3330 --- /dev/null +++ b/video/transcoder/create_job_with_static_overlay.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python + +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for creating a job based on a supplied job config that includes a static overlay. + +Example usage: + python create_job_with_static_overlay.py --project_id --location --input_uri --overlay_image_uri --output_uri +""" + +# [START transcoder_create_job_with_static_overlay] + +import argparse + +from google.cloud.video import transcoder_v1 +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) +from google.protobuf import duration_pb2 as duration + + +def create_job_with_static_overlay( + project_id, location, input_uri, overlay_image_uri, output_uri +): + """Creates a job based on an ad-hoc job configuration that includes a static image overlay. + + Args: + project_id: The GCP project ID. + location: The location to start the job in. + input_uri: Uri of the video in the Cloud Storage bucket. + overlay_image_uri: Uri of the image for the overlay in the Cloud Storage bucket. + output_uri: Uri of the video output folder in the Cloud Storage bucket.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + job = transcoder_v1.types.Job() + job.input_uri = input_uri + job.output_uri = output_uri + job.config = transcoder_v1.types.JobConfig( + elementary_streams=[ + transcoder_v1.types.ElementaryStream( + key="video-stream0", + video_stream=transcoder_v1.types.VideoStream( + h264=transcoder_v1.types.VideoStream.H264CodecSettings( + height_pixels=360, + width_pixels=640, + bitrate_bps=550000, + frame_rate=60, + ), + ), + ), + transcoder_v1.types.ElementaryStream( + key="audio-stream0", + audio_stream=transcoder_v1.types.AudioStream( + codec="aac", bitrate_bps=64000 + ), + ), + ], + mux_streams=[ + transcoder_v1.types.MuxStream( + key="sd", + container="mp4", + elementary_streams=["video-stream0", "audio-stream0"], + ), + ], + overlays=[ + transcoder_v1.types.Overlay( + image=transcoder_v1.types.Overlay.Image( + uri=overlay_image_uri, + resolution=transcoder_v1.types.Overlay.NormalizedCoordinate( + x=1, + y=0.5, + ), + alpha=1, + ), + animations=[ + transcoder_v1.types.Overlay.Animation( + animation_static=transcoder_v1.types.Overlay.AnimationStatic( + xy=transcoder_v1.types.Overlay.NormalizedCoordinate( + x=0, + y=0, + ), + start_time_offset=duration.Duration( + seconds=0, + ), + ), + ), + transcoder_v1.types.Overlay.Animation( + animation_end=transcoder_v1.types.Overlay.AnimationEnd( + start_time_offset=duration.Duration( + seconds=10, + ), + ), + ), + ], + ), + ], + ) + response = client.create_job(parent=parent, job=job) + print(f"Job: {response.name}") + return response + + +# [END transcoder_create_job_with_static_overlay] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", + help="The location to start this job in.", + default="us-central1", + ) + parser.add_argument( + "--input_uri", + help="Uri of the video in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--overlay_image_uri", + help="Uri of the overlay image in the Cloud Storage bucket.", + required=True, + ) + parser.add_argument( + "--output_uri", + help="Uri of the video output folder in the Cloud Storage bucket. Must end in '/'.", + required=True, + ) + args = parser.parse_args() + create_job_with_static_overlay( + args.project_id, + args.location, + args.input_uri, + args.overlay_image_uri, + args.output_uri, + ) diff --git a/video/transcoder/delete_job.py b/video/transcoder/delete_job.py new file mode 100644 index 00000000000..156d67c6d2c --- /dev/null +++ b/video/transcoder/delete_job.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for deleting a job. + +Example usage: + python delete_job.py --project_id --location --job_id +""" + +# [START transcoder_delete_job] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def delete_job(project_id, location, job_id): + """Gets a job. + + Args: + project_id: The GCP project ID. + location: The location this job is in. + job_id: The job ID.""" + + client = TranscoderServiceClient() + + name = f"projects/{project_id}/locations/{location}/jobs/{job_id}" + response = client.delete_job(name=name) + print("Deleted job") + return response + + +# [END transcoder_delete_job] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument("--location", help="The location of the job.", required=True) + parser.add_argument("--job_id", help="The job ID.", required=True) + args = parser.parse_args() + delete_job(args.project_id, args.location, args.job_id) diff --git a/video/transcoder/delete_job_template.py b/video/transcoder/delete_job_template.py new file mode 100644 index 00000000000..cdd3d710046 --- /dev/null +++ b/video/transcoder/delete_job_template.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for deleting a job template. + +Example usage: + python delete_job_template.py --project_id --location --template_id +""" + +# [START transcoder_delete_job_template] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def delete_job_template(project_id, location, template_id): + """Deletes a job template. + + Args: + project_id: The GCP project ID. + location: The location of the template. + template_id: The user-defined template ID.""" + + client = TranscoderServiceClient() + + name = f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}" + response = client.delete_job_template(name=name) + print("Deleted job template") + return response + + +# [END transcoder_delete_job_template] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", help="The location of the template.", required=True + ) + parser.add_argument("--template_id", help="The job template ID.", required=True) + args = parser.parse_args() + delete_job_template(args.project_id, args.location, args.template_id) diff --git a/video/transcoder/get_job.py b/video/transcoder/get_job.py new file mode 100644 index 00000000000..ff594bf342c --- /dev/null +++ b/video/transcoder/get_job.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for getting the details for a job. + +Example usage: + python get_job.py --project_id --location --job_id +""" + +# [START transcoder_get_job] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def get_job(project_id, location, job_id): + """Gets a job. + + Args: + project_id: The GCP project ID. + location: The location this job is in. + job_id: The job ID.""" + + client = TranscoderServiceClient() + + name = f"projects/{project_id}/locations/{location}/jobs/{job_id}" + response = client.get_job(name=name) + print(f"Job: {response.name}") + return response + + +# [END transcoder_get_job] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument("--location", help="The location of the job.", required=True) + parser.add_argument("--job_id", help="The job ID.", required=True) + args = parser.parse_args() + get_job(args.project_id, args.location, args.job_id) diff --git a/video/transcoder/get_job_state.py b/video/transcoder/get_job_state.py new file mode 100644 index 00000000000..86e2b341066 --- /dev/null +++ b/video/transcoder/get_job_state.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for getting the state for a job. + +Example usage: + python get_job_state.py --project_id --location --job_id +""" + +# [START transcoder_get_job_state] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def get_job_state(project_id, location, job_id): + """Gets a job's state. + + Args: + project_id: The GCP project ID. + location: The location this job is in. + job_id: The job ID.""" + + client = TranscoderServiceClient() + + name = f"projects/{project_id}/locations/{location}/jobs/{job_id}" + response = client.get_job(name=name) + + print(f"Job state: {str(response.state)}") + return response + + +# [END transcoder_get_job_state] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument("--location", help="The location of the job.", required=True) + parser.add_argument("--job_id", help="The job ID.", required=True) + args = parser.parse_args() + get_job_state(args.project_id, args.location, args.job_id) diff --git a/video/transcoder/get_job_template.py b/video/transcoder/get_job_template.py new file mode 100644 index 00000000000..92e3dbf6add --- /dev/null +++ b/video/transcoder/get_job_template.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for getting a job template. + +Example usage: + python get_job_template.py --project_id --location --template_id +""" + +# [START transcoder_get_job_template] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def get_job_template(project_id, location, template_id): + """Gets a job template. + + Args: + project_id: The GCP project ID. + location: The location of the template. + template_id: The user-defined template ID.""" + + client = TranscoderServiceClient() + + name = f"projects/{project_id}/locations/{location}/jobTemplates/{template_id}" + response = client.get_job_template(name=name) + print(f"Job template: {response.name}") + return response + + +# [END transcoder_get_job_template] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", help="The location of the template.", required=True + ) + parser.add_argument("--template_id", help="The job template ID.", required=True) + args = parser.parse_args() + get_job_template(args.project_id, args.location, args.template_id) diff --git a/video/transcoder/job_template_test.py b/video/transcoder/job_template_test.py new file mode 100644 index 00000000000..7cc70d0df46 --- /dev/null +++ b/video/transcoder/job_template_test.py @@ -0,0 +1,60 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +from google.api_core.exceptions import NotFound + +import create_job_template +import delete_job_template +import get_job_template +import list_job_templates + +location = "us-central1" +project_id = os.environ["GOOGLE_CLOUD_PROJECT"] +project_number = os.environ["GOOGLE_CLOUD_PROJECT_NUMBER"] +template_id = f"my-python-test-template-{uuid.uuid4()}" + + +def test_template_operations(capsys): + + # Enable the following API on the test project: + # * Transcoder API + + job_template_name = ( + f"projects/{project_number}/locations/{location}/jobTemplates/{template_id}" + ) + + try: + delete_job_template.delete_job_template(project_id, location, template_id) + except NotFound as e: + print(f"Ignoring NotFound, details: {e}") + out, _ = capsys.readouterr() + + create_job_template.create_job_template(project_id, location, template_id) + out, _ = capsys.readouterr() + assert job_template_name in out + + get_job_template.get_job_template(project_id, location, template_id) + out, _ = capsys.readouterr() + assert job_template_name in out + + list_job_templates.list_job_templates(project_id, location) + out, _ = capsys.readouterr() + assert job_template_name in out + + delete_job_template.delete_job_template(project_id, location, template_id) + out, _ = capsys.readouterr() + assert "Deleted job template" in out diff --git a/video/transcoder/job_test.py b/video/transcoder/job_test.py new file mode 100644 index 00000000000..998555d8d35 --- /dev/null +++ b/video/transcoder/job_test.py @@ -0,0 +1,520 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +import uuid + +import backoff +from google.cloud import storage +from googleapiclient.errors import HttpError +import pytest + +import create_job_from_ad_hoc +import create_job_from_preset +import create_job_from_template +import create_job_template +import create_job_with_animated_overlay +import create_job_with_concatenated_inputs +import create_job_with_embedded_captions +import create_job_with_periodic_images_spritesheet +import create_job_with_set_number_images_spritesheet +import create_job_with_standalone_captions +import create_job_with_static_overlay +import delete_job +import delete_job_template +import get_job +import get_job_state +import list_jobs + +location = "us-central1" +project_id = os.environ["GOOGLE_CLOUD_PROJECT"] +project_number = os.environ["GOOGLE_CLOUD_PROJECT_NUMBER"] +template_id = f"my-python-test-template-{uuid.uuid4()}" + +input_bucket_name = "cloud-samples-data/media/" +output_bucket_name = f"python-samples-transcoder-{uuid.uuid4()}" +test_video_file_name = "ChromeCast.mp4" +test_overlay_image_file_name = "overlay.jpg" +test_concat1_file_name = "ForBiggerEscapes.mp4" +test_concat2_file_name = "ForBiggerJoyrides.mp4" +test_captions_file_name = "captions.srt" +test_subtitles1_file_name = "subtitles-en.srt" +test_subtitles2_file_name = "subtitles-es.srt" + +input_uri = f"gs://{input_bucket_name}{test_video_file_name}" +overlay_image_uri = f"gs://{input_bucket_name}{test_overlay_image_file_name}" +concat1_uri = f"gs://{input_bucket_name}{test_concat1_file_name}" +concat2_uri = f"gs://{input_bucket_name}{test_concat2_file_name}" +captions_uri = f"gs://{input_bucket_name}{test_captions_file_name}" +subtitles1_uri = f"gs://{input_bucket_name}{test_subtitles1_file_name}" +subtitles2_uri = f"gs://{input_bucket_name}{test_subtitles2_file_name}" +output_uri_for_preset = f"gs://{output_bucket_name}/test-output-preset/" +output_uri_for_template = f"gs://{output_bucket_name}/test-output-template/" +output_uri_for_adhoc = f"gs://{output_bucket_name}/test-output-adhoc/" +output_uri_for_static_overlay = f"gs://{output_bucket_name}/test-output-static-overlay/" +output_uri_for_animated_overlay = ( + f"gs://{output_bucket_name}/test-output-animated-overlay/" +) +output_uri_for_embedded_captions = ( + f"gs://{output_bucket_name}/test-output-embedded-captions/" +) +output_uri_for_standalone_captions = ( + f"gs://{output_bucket_name}/test-output-standalone-captions/" +) + +small_spritesheet_file_prefix = "small-sprite-sheet" +large_spritesheet_file_prefix = "large-sprite-sheet" +spritesheet_file_suffix = "0000000000.jpeg" + +output_dir_for_set_number_spritesheet = "test-output-set-number-spritesheet/" +output_uri_for_set_number_spritesheet = ( + f"gs://{output_bucket_name}/{output_dir_for_set_number_spritesheet}" +) +output_dir_for_periodic_spritesheet = "test-output-periodic-spritesheet/" +output_uri_for_periodic_spritesheet = ( + f"gs://{output_bucket_name}/{output_dir_for_periodic_spritesheet}" +) +output_uri_for_concat = f"gs://{output_bucket_name}/test-output-concat/" + +preset = "preset/web-hd" +job_succeeded_state = "ProcessingState.SUCCEEDED" +job_running_state = "ProcessingState.RUNNING" + + +@pytest.fixture(scope="module") +def test_bucket(): + storage_client = storage.Client() + bucket = storage_client.create_bucket(output_bucket_name) + + yield bucket + bucket.delete(force=True) + + +def test_create_job_from_preset(capsys, test_bucket): + create_job_from_preset.create_job_from_preset( + project_id, location, input_uri, output_uri_for_preset, preset + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep(30) + + _assert_job_state_succeeded_or_running(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_from_template(capsys, test_bucket): + + job_template_name = ( + f"projects/{project_number}/locations/{location}/jobTemplates/{template_id}" + ) + + create_job_template.create_job_template(project_id, location, template_id) + out, _ = capsys.readouterr() + assert job_template_name in out + + create_job_from_template.create_job_from_template( + project_id, location, input_uri, output_uri_for_template, template_id + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep(30) + + _assert_job_state_succeeded_or_running(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + delete_job_template.delete_job_template(project_id, location, template_id) + out, _ = capsys.readouterr() + assert "Deleted job template" in out + + +def test_create_job_from_ad_hoc(capsys, test_bucket): + create_job_from_ad_hoc.create_job_from_ad_hoc( + project_id, location, input_uri, output_uri_for_adhoc + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep(30) + + _assert_job_state_succeeded_or_running(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_static_overlay(capsys, test_bucket): + create_job_with_static_overlay.create_job_with_static_overlay( + project_id, + location, + input_uri, + overlay_image_uri, + output_uri_for_static_overlay, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep(30) + + _assert_job_state_succeeded(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_animated_overlay(capsys, test_bucket): + create_job_with_animated_overlay.create_job_with_animated_overlay( + project_id, + location, + input_uri, + overlay_image_uri, + output_uri_for_animated_overlay, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep(30) + + _assert_job_state_succeeded(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_set_number_spritesheet(capsys, test_bucket): + create_job_with_set_number_images_spritesheet.create_job_with_set_number_images_spritesheet( + project_id, + location, + input_uri, + output_uri_for_set_number_spritesheet, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert ( + job_name in out + ) # Get the job name so you can use it later to get the job and delete the job. + + time.sleep( + 30 + ) # Transcoding jobs need time to complete. Once the job completes, check the job state. + + _assert_job_state_succeeded(capsys, job_id) + _assert_file_in_bucket( + capsys, + test_bucket, + output_dir_for_set_number_spritesheet + + small_spritesheet_file_prefix + + spritesheet_file_suffix, + ) + _assert_file_in_bucket( + capsys, + test_bucket, + output_dir_for_set_number_spritesheet + + large_spritesheet_file_prefix + + spritesheet_file_suffix, + ) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_periodic_spritesheet(capsys, test_bucket): + create_job_with_periodic_images_spritesheet.create_job_with_periodic_images_spritesheet( + project_id, + location, + input_uri, + output_uri_for_periodic_spritesheet, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert ( + job_name in out + ) # Get the job name so you can use it later to get the job and delete the job. + + time.sleep( + 30 + ) # Transcoding jobs need time to complete. Once the job completes, check the job state. + + _assert_job_state_succeeded(capsys, job_id) + _assert_file_in_bucket( + capsys, + test_bucket, + output_dir_for_periodic_spritesheet + + small_spritesheet_file_prefix + + spritesheet_file_suffix, + ) + _assert_file_in_bucket( + capsys, + test_bucket, + output_dir_for_periodic_spritesheet + + large_spritesheet_file_prefix + + spritesheet_file_suffix, + ) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_concatenated_inputs(capsys, test_bucket): + create_job_with_concatenated_inputs.create_job_with_concatenated_inputs( + project_id, + location, + concat1_uri, + "0s", + "8.1s", + concat2_uri, + "3.5s", + "15s", + output_uri_for_concat, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep( + 30 + ) # Transcoding jobs need time to complete. Once the job completes, check the job state. + + _assert_job_state_succeeded(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_embedded_captions(capsys, test_bucket): + create_job_with_embedded_captions.create_job_with_embedded_captions( + project_id, + location, + input_uri, + captions_uri, + output_uri_for_embedded_captions, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep( + 30 + ) # Transcoding jobs need time to complete. Once the job completes, check the job state. + + _assert_job_state_succeeded(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +def test_create_job_with_standalone_captions(capsys, test_bucket): + create_job_with_standalone_captions.create_job_with_standalone_captions( + project_id, + location, + input_uri, + subtitles1_uri, + subtitles2_uri, + output_uri_for_standalone_captions, + ) + out, _ = capsys.readouterr() + job_name_prefix = f"projects/{project_number}/locations/{location}/jobs/" + assert job_name_prefix in out + + str_slice = out.split("/") + job_id = str_slice[len(str_slice) - 1].rstrip("\n") + job_name = f"projects/{project_number}/locations/{location}/jobs/{job_id}" + assert job_name in out + + get_job.get_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert job_name in out + + time.sleep( + 30 + ) # Transcoding jobs need time to complete. Once the job completes, check the job state. + + _assert_job_state_succeeded(capsys, job_id) + + list_jobs.list_jobs(project_id, location) + out, _ = capsys.readouterr() + assert job_name in out + + delete_job.delete_job(project_id, location, job_id) + out, _ = capsys.readouterr() + assert "Deleted job" in out + + +# Retrying up to 10 mins. This function checks if the job completed +# successfully. +@backoff.on_exception(backoff.expo, AssertionError, max_time=600) +def _assert_job_state_succeeded(capsys, job_id): + try: + get_job_state.get_job_state(project_id, location, job_id) + except HttpError as err: + raise AssertionError(f"Could not get job state: {err.resp.status}") + + out, _ = capsys.readouterr() + assert job_succeeded_state in out + + +# Retrying up to 10 mins. This function checks if the job is running or has +# completed. Both of these conditions signal the API is functioning. The test +# can list or delete a job that is running or completed with no ill effects. +@backoff.on_exception(backoff.expo, AssertionError, max_time=600) +def _assert_job_state_succeeded_or_running(capsys, job_id): + try: + get_job_state.get_job_state(project_id, location, job_id) + except HttpError as err: + raise AssertionError(f"Could not get job state: {err.resp.status}") + + out, _ = capsys.readouterr() + assert (job_succeeded_state in out) or (job_running_state in out) + + +def _assert_file_in_bucket(capsys, test_bucket, directory_and_filename): + blob = test_bucket.blob(directory_and_filename) + assert blob.exists() diff --git a/video/transcoder/list_job_templates.py b/video/transcoder/list_job_templates.py new file mode 100644 index 00000000000..8a30af133b8 --- /dev/null +++ b/video/transcoder/list_job_templates.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for listing job templates in a location. + +Example usage: + python list_job_templates.py --project_id --location +""" + +# [START transcoder_list_job_templates] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def list_job_templates(project_id, location): + """Lists all job templates in a location. + + Args: + project_id: The GCP project ID. + location: The location of the templates.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + response = client.list_job_templates(parent=parent) + print("Job templates:") + for jobTemplate in response.job_templates: + print({jobTemplate.name}) + + return response + + +# [END transcoder_list_job_templates] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument( + "--location", help="The location of the templates.", required=True + ) + args = parser.parse_args() + list_job_templates(args.project_id, args.location) diff --git a/video/transcoder/list_jobs.py b/video/transcoder/list_jobs.py new file mode 100644 index 00000000000..880e234335b --- /dev/null +++ b/video/transcoder/list_jobs.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Transcoder sample for listing jobs in a location. + +Example usage: + python list_jobs.py --project_id --location +""" + +# [START transcoder_list_jobs] + +import argparse + +from google.cloud.video.transcoder_v1.services.transcoder_service import ( + TranscoderServiceClient, +) + + +def list_jobs(project_id, location): + """Lists all jobs in a location. + + Args: + project_id: The GCP project ID. + location: The location of the jobs.""" + + client = TranscoderServiceClient() + + parent = f"projects/{project_id}/locations/{location}" + response = client.list_jobs(parent=parent) + print("Jobs:") + for job in response.jobs: + print({job.name}) + + return response + + +# [END transcoder_list_jobs] + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--project_id", help="Your Cloud project ID.", required=True) + parser.add_argument("--location", help="The location of the jobs.", required=True) + args = parser.parse_args() + list_jobs(args.project_id, args.location) diff --git a/video/transcoder/noxfile_config.py b/video/transcoder/noxfile_config.py new file mode 100644 index 00000000000..ca446bfd315 --- /dev/null +++ b/video/transcoder/noxfile_config.py @@ -0,0 +1,42 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": ["2.7", "3.6"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/video/transcoder/requirements-test.txt b/video/transcoder/requirements-test.txt new file mode 100644 index 00000000000..0852b43c2d3 --- /dev/null +++ b/video/transcoder/requirements-test.txt @@ -0,0 +1,3 @@ +backoff==2.2.1 +google-cloud-storage==2.9.0 +pytest==7.3.1 diff --git a/video/transcoder/requirements.txt b/video/transcoder/requirements.txt new file mode 100644 index 00000000000..4c8f7dfb8eb --- /dev/null +++ b/video/transcoder/requirements.txt @@ -0,0 +1,3 @@ +google-api-python-client==2.88.0 +grpcio==1.54.2 +google-cloud-video-transcoder==1.9.0