diff --git a/head b/head
index e5209205ced..1c1bba2109d 100644
--- a/head
+++ b/head
@@ -1 +1 @@
-579fe684ce5500480bf7561d27b7a3ac9bbc7c88
+1eadef6a63051424cda6de9ed82664b49b7be145
diff --git a/upstream/.github/workflows/ci.yaml b/upstream/.github/workflows/ci.yaml
index c791d90b75c..fd0ae8788f1 100644
--- a/upstream/.github/workflows/ci.yaml
+++ b/upstream/.github/workflows/ci.yaml
@@ -45,7 +45,7 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0
with:
- version: v1.61.0
+ version: v1.57.2
args: --timeout=10m
- name: yamllint
run: |
diff --git a/upstream/.github/workflows/codeql-analysis.yml b/upstream/.github/workflows/codeql-analysis.yml
index e2b259f9de2..3938c671ec5 100644
--- a/upstream/.github/workflows/codeql-analysis.yml
+++ b/upstream/.github/workflows/codeql-analysis.yml
@@ -49,20 +49,16 @@ jobs:
steps:
- name: Harden Runner
- uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1
+ uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- name: Checkout repository
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- - name: Setup go
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5
- with:
- go-version: '1.22.x'
+ uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4.1.3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@f779452ac5af1c261dce0346a8f964149f49322b # v3.26.13
+ uses: github/codeql-action/init@c7f9125735019aa87cfc361530512d50ea439c71 # v3.25.1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -70,7 +66,7 @@ jobs:
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- - uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1
+ - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: |
~/.cache/go-build
@@ -96,4 +92,4 @@ jobs:
make -j 4 all
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@f779452ac5af1c261dce0346a8f964149f49322b # v3.26.13
+ uses: github/codeql-action/analyze@c7f9125735019aa87cfc361530512d50ea439c71 # v3.25.1
diff --git a/upstream/.github/workflows/dependency-review.yml b/upstream/.github/workflows/dependency-review.yml
index b362633e6dc..bb377e05999 100644
--- a/upstream/.github/workflows/dependency-review.yml
+++ b/upstream/.github/workflows/dependency-review.yml
@@ -17,13 +17,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
- uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1
+ uses: step-security/harden-runner@0080882f6c36860b6ba35c610c98ce87d4e2f26f # v2.10.2
with:
egress-policy: audit
- name: 'Checkout Repository'
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: 'Dependency Review'
- uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4
+ uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0
with:
fail-on-severity: low
diff --git a/upstream/.github/workflows/scorecard.yml b/upstream/.github/workflows/scorecard.yml
index 26407912ed7..0a651e4129c 100644
--- a/upstream/.github/workflows/scorecard.yml
+++ b/upstream/.github/workflows/scorecard.yml
@@ -29,16 +29,16 @@ jobs:
steps:
- name: Harden Runner
- uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1
+ uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- name: "Checkout code"
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
+ uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4.1.3
with:
persist-credentials: false
- name: "Run analysis"
- uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
+ uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
with:
results_file: results.sarif
results_format: sarif
@@ -53,7 +53,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
- uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
+ uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
with:
name: SARIF file
path: results.sarif
@@ -61,6 +61,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@f779452ac5af1c261dce0346a8f964149f49322b # v3.26.13
+ uses: github/codeql-action/upload-sarif@c7f9125735019aa87cfc361530512d50ea439c71 # v3.25.1
with:
sarif_file: results.sarif
diff --git a/upstream/.github/workflows/woke.yml b/upstream/.github/workflows/woke.yml
index 443ba93e313..abd498d453e 100644
--- a/upstream/.github/workflows/woke.yml
+++ b/upstream/.github/workflows/woke.yml
@@ -11,15 +11,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
- uses: step-security/harden-runner@91182cccc01eb5e619899d80e4e971d6181294a7 # v2.10.1
+ uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
with:
egress-policy: audit
- name: 'Checkout'
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
+ uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4.1.3
- name: Get changed files
id: changed-files
- uses: tj-actions/changed-files@c3a1bb2c992d77180ae65be6ae6c166cf40f857c # v45.0.3
+ uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 # v44.3.0
with:
write_output_files: true
files: |
diff --git a/upstream/.golangci.yml b/upstream/.golangci.yml
index 3dfaf597ef3..72e9dbee8e5 100644
--- a/upstream/.golangci.yml
+++ b/upstream/.golangci.yml
@@ -1,10 +1,5 @@
# Documentation: https://golangci-lint.run/usage/configuration/
-
linters-settings:
- gosec:
- excludes:
- - G601
- exclude-generated: true
errcheck:
exclude-functions:
- (*github.com/tektoncd/pipeline/vendor/go.uber.org/zap.SugaredLogger).Sync
@@ -85,13 +80,12 @@ linters:
- gocyclo
- godot
- godox
- - err113
+ - goerr113
- gofumpt
- gomnd
- gomoddirectives
- ireturn
- lll
- - mnd
- nestif
- nlreturn
- nonamedreturns
diff --git a/upstream/DEVELOPMENT.md b/upstream/DEVELOPMENT.md
index 51ccc7abcbc..00658792995 100644
--- a/upstream/DEVELOPMENT.md
+++ b/upstream/DEVELOPMENT.md
@@ -283,7 +283,7 @@ as follows.
The recommended minimum development configuration is:
-- Kubernetes version 1.28 or later
+- Kubernetes version 1.27 or later
- 4 (virtual) CPU nodes
- 8 GB of (actual or virtualized) platform memory
- Node autoscaling, up to 3 nodes
@@ -341,7 +341,7 @@ optional: As a convenience, the [Tekton plumbing project](https://github.com/tek
--region=us-central1 \
--machine-type=e2-standard-4 \
--num-nodes=1 \
- --cluster-version=1.28
+ --cluster-version=1.27
```
> **Note**: The recommended [GCE machine type](https://cloud.google.com/compute/docs/machine-types) is `'e2-standard-4'`.
diff --git a/upstream/Makefile b/upstream/Makefile
index c3676d263c5..519e8b11fee 100644
--- a/upstream/Makefile
+++ b/upstream/Makefile
@@ -9,8 +9,7 @@ TESTPKGS = $(shell env GO111MODULE=on $(GO) list -f \
BIN = $(CURDIR)/.bin
WOKE ?= go run -modfile go.mod github.com/get-woke/woke
-# Get golangci_version from tools/go.mod
-GOLANGCI_VERSION := $(shell cat tools/go.mod | grep golangci-lint | awk '{ print $$3 }')
+GOLANGCI_VERSION = v1.57.2
WOKE_VERSION = v0.19.0
GO = go
@@ -166,10 +165,9 @@ $(BIN)/errcheck: PACKAGE=github.com/kisielk/errcheck
errcheck: | $(ERRCHECK) ; $(info $(M) running errcheck…) ## Run errcheck
$Q $(ERRCHECK) ./...
-GOLANGCILINT = $(BIN)/golangci-lint-$(GOLANGCI_VERSION)
-$(BIN)/golangci-lint-$(GOLANGCI_VERSION): ; $(info $(M) getting golangci-lint $(GOLANGCI_VERSION))
- cd tools; go mod download github.com/golangci/golangci-lint && go mod tidy
- cd tools; go build -o $(BIN)/golangci-lint-$(GOLANGCI_VERSION) github.com/golangci/golangci-lint/cmd/golangci-lint
+GOLANGCILINT = $(BIN)/golangci-lint
+$(BIN)/golangci-lint: ; $(info $(M) getting golangci-lint $(GOLANGCI_VERSION))
+ cd tools; GOBIN=$(BIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_VERSION)
.PHONY: golangci-lint
golangci-lint: | $(GOLANGCILINT) ; $(info $(M) running golangci-lint…) @ ## Run golangci-lint
diff --git a/upstream/OWNERS_ALIASES b/upstream/OWNERS_ALIASES
index 8a0aa0b41e1..ca9443a941d 100644
--- a/upstream/OWNERS_ALIASES
+++ b/upstream/OWNERS_ALIASES
@@ -1,11 +1,13 @@
aliases:
pipeline-approvers:
- afrittoli
+ - bobcatfish
- dibyom
- ImJasonH
- vdemeester
- pritidesai
- jerop
+ - lbernick
- abayer
- wlynch
- yongxuanzhang
@@ -14,6 +16,7 @@ aliases:
pipeline-reviewers:
- afrittoli
+ - bobcatfish
- dibyom
- vdemeester
- pritidesai
@@ -23,16 +26,19 @@ aliases:
apis-approvers:
- afrittoli
+ - bobcatfish
- dibyom
- ImJasonH
- vdemeester
- pritidesai
- jerop
+ - lbernick
- abayer
- wlynch
productivity-approvers:
- afrittoli
+ - bobcatfish
- vdemeester
# Alumni ❤️
@@ -42,5 +48,3 @@ aliases:
# aaron-prindle
# sbwsg
# dlorenc
-# lbernick
-# bobcatfish
\ No newline at end of file
diff --git a/upstream/README.md b/upstream/README.md
index d9692f738cb..a73f36dbe0b 100644
--- a/upstream/README.md
+++ b/upstream/README.md
@@ -44,7 +44,6 @@ Tekton Pipelines are **Typed**:
- Starting from the v0.45.x release of Tekton: **Kubernetes version 1.24 or later**
- Starting from the v0.51.x release of Tekton: **Kubernetes version 1.25 or later**
- Starting from the v0.59.x release of Tekton: **Kubernetes version 1.27 or later**
-- Starting from the v0.61.x release of Tekton: **Kubernetes version 1.28 or later**
### Read the docs
diff --git a/upstream/cmd/controller/main.go b/upstream/cmd/controller/main.go
index 6b0733329be..664eafe21cc 100644
--- a/upstream/cmd/controller/main.go
+++ b/upstream/cmd/controller/main.go
@@ -56,7 +56,6 @@ func main() {
flag.StringVar(&opts.Images.ShellImage, "shell-image", "", "The container image containing a shell")
flag.StringVar(&opts.Images.ShellImageWin, "shell-image-win", "", "The container image containing a windows shell")
flag.StringVar(&opts.Images.WorkingDirInitImage, "workingdirinit-image", "", "The container image containing our working dir init binary.")
- flag.DurationVar(&opts.ResyncPeriod, "resync-period", controller.DefaultResyncPeriod, "The period between two resync run (going through all objects)")
// This parses flags.
cfg := injection.ParseAndGetRESTConfigOrDie()
@@ -99,8 +98,6 @@ func main() {
}()
ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey)
- ctx = controller.WithResyncPeriod(ctx, opts.ResyncPeriod)
-
sharedmain.MainWithConfig(ctx, ControllerLogKey, cfg,
taskrun.NewController(opts, clock.RealClock{}),
pipelinerun.NewController(opts, clock.RealClock{}),
diff --git a/upstream/cmd/entrypoint/main.go b/upstream/cmd/entrypoint/main.go
index 44c8fe5de3a..adf7a0a9d4a 100644
--- a/upstream/cmd/entrypoint/main.go
+++ b/upstream/cmd/entrypoint/main.go
@@ -17,6 +17,7 @@ limitations under the License.
package main
import (
+ "context"
"encoding/json"
"errors"
"flag"
@@ -32,7 +33,6 @@ import (
"github.com/tektoncd/pipeline/cmd/entrypoint/subcommands"
featureFlags "github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/credentials"
"github.com/tektoncd/pipeline/pkg/credentials/dockercreds"
"github.com/tektoncd/pipeline/pkg/credentials/gitcreds"
@@ -50,12 +50,10 @@ var (
terminationPath = flag.String("termination_path", "/tekton/termination", "If specified, file to write upon termination")
results = flag.String("results", "", "If specified, list of file names that might contain task results")
stepResults = flag.String("step_results", "", "step results if specified")
- whenExpressions = flag.String("when_expressions", "", "when expressions if specified")
timeout = flag.Duration("timeout", time.Duration(0), "If specified, sets timeout for step")
stdoutPath = flag.String("stdout_path", "", "If specified, file to copy stdout to")
stderrPath = flag.String("stderr_path", "", "If specified, file to copy stderr to")
breakpointOnFailure = flag.Bool("breakpoint_on_failure", false, "If specified, expect steps to not skip on failure")
- debugBeforeStep = flag.Bool("debug_before_step", false, "If specified, wait for a debugger to attach before executing the step")
onError = flag.String("on_error", "", "Set to \"continue\" to ignore an error and continue when a container terminates with a non-zero exit code."+
" Set to \"stopAndFail\" to declare a failure with a step error and stop executing the rest of the steps.")
stepMetadataDir = flag.String("step_metadata_dir", "", "If specified, create directory to store the step metadata e.g. /tekton/steps//")
@@ -66,8 +64,25 @@ var (
const (
defaultWaitPollingInterval = time.Second
+ breakpointExitSuffix = ".breakpointexit"
)
+func checkForBreakpointOnFailure(e entrypoint.Entrypointer, breakpointExitPostFile string) {
+ if e.BreakpointOnFailure {
+ if waitErr := e.Waiter.Wait(context.Background(), breakpointExitPostFile, false, false); waitErr != nil {
+ log.Println("error occurred while waiting for " + breakpointExitPostFile + " : " + waitErr.Error())
+ }
+ // get exitcode from .breakpointexit
+ exitCode, readErr := e.BreakpointExitCode(breakpointExitPostFile)
+ // if readErr exists, the exitcode with default to 0 as we would like
+ // to encourage to continue running the next steps in the taskRun
+ if readErr != nil {
+ log.Println("error occurred while reading breakpoint exit code : " + readErr.Error())
+ }
+ os.Exit(exitCode)
+ }
+}
+
func main() {
// Add credential flags originally introduced with our legacy credentials helper
// image (creds-init).
@@ -123,12 +138,6 @@ func main() {
log.Fatal(err)
}
}
- var when v1.StepWhenExpressions
- if len(*whenExpressions) > 0 {
- if err := json.Unmarshal([]byte(*whenExpressions), &when); err != nil {
- log.Fatal(err)
- }
- }
var spireWorkloadAPI spire.EntrypointerAPIClient
if enableSpire != nil && *enableSpire && socketPath != nil && *socketPath != "" {
@@ -153,9 +162,7 @@ func main() {
Results: strings.Split(*results, ","),
StepResults: strings.Split(*stepResults, ","),
Timeout: timeout,
- StepWhenExpressions: when,
BreakpointOnFailure: *breakpointOnFailure,
- DebugBeforeStep: *debugBeforeStep,
OnError: *onError,
StepMetadataDir: *stepMetadataDir,
SpireWorkloadAPI: spireWorkloadAPI,
@@ -169,10 +176,8 @@ func main() {
}
if err := e.Go(); err != nil {
+ breakpointExitPostFile := e.PostFile + breakpointExitSuffix
switch t := err.(type) { //nolint:errorlint // checking for multiple types with errors.As is ugly.
- case entrypoint.DebugBeforeStepError:
- log.Println("Skipping execute step script because before step breakpoint fail-continue")
- os.Exit(1)
case entrypoint.SkipError:
log.Print("Skipping step because a previous step failed")
os.Exit(1)
@@ -196,7 +201,7 @@ func main() {
// in both cases has an ExitStatus() method with the
// same signature.
if status, ok := t.Sys().(syscall.WaitStatus); ok {
- e.CheckForBreakpointOnFailure()
+ checkForBreakpointOnFailure(e, breakpointExitPostFile)
// ignore a step error i.e. do not exit if a container terminates with a non-zero exit code when onError is set to "continue"
if e.OnError != entrypoint.ContinueOnError {
os.Exit(status.ExitStatus())
@@ -207,7 +212,7 @@ func main() {
log.Fatalf("Error executing command (ExitError): %v", err)
}
default:
- e.CheckForBreakpointOnFailure()
+ checkForBreakpointOnFailure(e, breakpointExitPostFile)
log.Fatalf("Error executing command: %v", err)
}
}
diff --git a/upstream/cmd/resolvers/main.go b/upstream/cmd/resolvers/main.go
index a5f0cadf7ab..f66e9cd0a89 100644
--- a/upstream/cmd/resolvers/main.go
+++ b/upstream/cmd/resolvers/main.go
@@ -17,47 +17,27 @@ limitations under the License.
package main
import (
- "flag"
"os"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/bundle"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/git"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/http"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/hub"
- hubresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
- "k8s.io/client-go/rest"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/http"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
- "knative.dev/pkg/controller"
- "knative.dev/pkg/injection"
"knative.dev/pkg/injection/sharedmain"
"knative.dev/pkg/signals"
)
func main() {
- flag.IntVar(&controller.DefaultThreadsPerController, "threads-per-controller", controller.DefaultThreadsPerController, "Threads (goroutines) to create per controller")
-
ctx := filteredinformerfactory.WithSelectors(signals.NewContext(), v1alpha1.ManagedByLabelKey)
tektonHubURL := buildHubURL(os.Getenv("TEKTON_HUB_API"), "")
- artifactHubURL := buildHubURL(os.Getenv("ARTIFACT_HUB_API"), hubresolution.DefaultArtifactHubURL)
-
- // This parses flags.
- cfg := injection.ParseAndGetRESTConfigOrDie()
-
- if cfg.QPS == 0 {
- cfg.QPS = 2 * rest.DefaultQPS
- }
- if cfg.Burst == 0 {
- cfg.Burst = rest.DefaultBurst
- }
- // multiply by no of controllers being created
- cfg.QPS = 5 * cfg.QPS
- cfg.Burst = 5 * cfg.Burst
+ artifactHubURL := buildHubURL(os.Getenv("ARTIFACT_HUB_API"), hub.DefaultArtifactHubURL)
- sharedmain.MainWithConfig(ctx, "controller", cfg,
+ sharedmain.MainWithContext(ctx, "controller",
framework.NewController(ctx, &git.Resolver{}),
framework.NewController(ctx, &hub.Resolver{TektonHubURL: tektonHubURL, ArtifactHubURL: artifactHubURL}),
framework.NewController(ctx, &bundle.Resolver{}),
diff --git a/upstream/cmd/sidecarlogresults/main.go b/upstream/cmd/sidecarlogresults/main.go
index b63d3c4feb3..c66ce1133e0 100644
--- a/upstream/cmd/sidecarlogresults/main.go
+++ b/upstream/cmd/sidecarlogresults/main.go
@@ -32,19 +32,14 @@ func main() {
var resultsDir string
var resultNames string
var stepResultsStr string
- var stepNames string
-
flag.StringVar(&resultsDir, "results-dir", pipeline.DefaultResultPath, "Path to the results directory. Default is /tekton/results")
flag.StringVar(&resultNames, "result-names", "", "comma separated result names to expect from the steps running in the pod. eg. foo,bar,baz")
flag.StringVar(&stepResultsStr, "step-results", "", "json containing a map of step Name as key and list of result Names. eg. {\"stepName\":[\"foo\",\"bar\",\"baz\"]}")
- flag.StringVar(&stepNames, "step-names", "", "comma separated step names. eg. foo,bar,baz")
flag.Parse()
-
- var expectedResults []string
- // strings.Split returns [""] instead of [] for empty string, we don't want pass [""] to other methods.
- if len(resultNames) > 0 {
- expectedResults = strings.Split(resultNames, ",")
+ if resultNames == "" {
+ log.Fatal("result-names were not provided")
}
+ expectedResults := strings.Split(resultNames, ",")
expectedStepResults := map[string][]string{}
if err := json.Unmarshal([]byte(stepResultsStr), &expectedStepResults); err != nil {
log.Fatal(err)
@@ -53,13 +48,4 @@ func main() {
if err != nil {
log.Fatal(err)
}
-
- var names []string
- if len(stepNames) > 0 {
- names = strings.Split(stepNames, ",")
- }
- err = sidecarlogresults.LookForArtifacts(os.Stdout, names, pod.RunDir)
- if err != nil {
- log.Fatal(err)
- }
}
diff --git a/upstream/cmd/webhook/main.go b/upstream/cmd/webhook/main.go
index 7281f0af421..02128320bc1 100644
--- a/upstream/cmd/webhook/main.go
+++ b/upstream/cmd/webhook/main.go
@@ -60,7 +60,6 @@ var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{
v1beta1.SchemeGroupVersion.WithKind("TaskRun"): &v1beta1.TaskRun{},
v1beta1.SchemeGroupVersion.WithKind("PipelineRun"): &v1beta1.PipelineRun{},
v1beta1.SchemeGroupVersion.WithKind("CustomRun"): &v1beta1.CustomRun{},
- v1beta1.SchemeGroupVersion.WithKind("StepAction"): &v1beta1.StepAction{},
// v1
v1.SchemeGroupVersion.WithKind("Task"): &v1.Task{},
v1.SchemeGroupVersion.WithKind("Pipeline"): &v1.Pipeline{},
@@ -154,7 +153,6 @@ func newConfigValidationController(name string) func(context.Context, configmap.
func newConversionController(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
var (
- v1alpha1GroupVersion = v1alpha1.SchemeGroupVersion.Version
v1beta1GroupVersion = v1beta1.SchemeGroupVersion.Version
v1GroupVersion = v1.SchemeGroupVersion.Version
resolutionv1alpha1GroupVersion = resolutionv1alpha1.SchemeGroupVersion.Version
@@ -172,14 +170,6 @@ func newConversionController(ctx context.Context, cmw configmap.Watcher) *contro
// conversions to and from all types.
// "Zygotes" are the supported versions.
map[schema.GroupKind]conversion.GroupKindConversion{
- v1beta1.Kind("StepAction"): {
- DefinitionName: pipeline.StepActionResource.String(),
- HubVersion: v1alpha1GroupVersion,
- Zygotes: map[string]conversion.ConvertibleObject{
- v1alpha1GroupVersion: &v1alpha1.StepAction{},
- v1beta1GroupVersion: &v1beta1.StepAction{},
- },
- },
v1.Kind("Task"): {
DefinitionName: pipeline.TaskResource.String(),
HubVersion: v1beta1GroupVersion,
diff --git a/upstream/config/300-crds/300-stepaction.yaml b/upstream/config/300-crds/300-stepaction.yaml
index 5f91f9629b3..a27eaf20179 100644
--- a/upstream/config/300-crds/300-stepaction.yaml
+++ b/upstream/config/300-crds/300-stepaction.yaml
@@ -26,24 +26,6 @@ spec:
preserveUnknownFields: false
versions:
- name: v1alpha1
- served: true
- storage: false
- schema:
- openAPIV3Schema:
- type: object
- # One can use x-kubernetes-preserve-unknown-fields: true
- # at the root of the schema (and inside any properties, additionalProperties)
- # to get the traditional CRD behaviour that nothing is pruned, despite
- # setting spec.preserveUnknownProperties: false.
- #
- # See https://kubernetes.io/blog/2019/06/20/crd-structural-schema/
- # See issue: https://github.com/knative/serving/issues/912
- x-kubernetes-preserve-unknown-fields: true
- # Opt into the status subresource so metadata.generation
- # starts to increment
- subresources:
- status: {}
- - name: v1beta1
served: true
storage: true
schema:
diff --git a/upstream/config/config-feature-flags.yaml b/upstream/config/config-feature-flags.yaml
index 398f37d1880..bea6994cc99 100644
--- a/upstream/config/config-feature-flags.yaml
+++ b/upstream/config/config-feature-flags.yaml
@@ -107,7 +107,8 @@ data:
enforce-nonfalsifiability: "none"
# Setting this flag will determine how Tekton pipelines will handle extracting results from the task.
# Acceptable values are "termination-message" or "sidecar-logs".
- # "sidecar-logs" is now a beta feature.
+ # "sidecar-logs" is an experimental feature and thus should still be considered
+ # an alpha feature.
results-from: "termination-message"
# Setting this flag will determine the upper limit of each task result
# This flag is optional and only associated with the previous flag, results-from
@@ -136,7 +137,3 @@ data:
# "pipelinerun" for Pipelinerun and "taskrun" for Taskrun. Or a combination of
# these.
disable-inline-spec: ""
- # Setting this flag to "true" will enable the use of concise resolver syntax
- enable-concise-resolver-syntax: "false"
- # Setthing this flag to "true" will enable native Kubernetes Sidecar support
- enable-kubernetes-sidecar: "false"
diff --git a/upstream/config/config-observability.yaml b/upstream/config/config-observability.yaml
index facda374d83..f1f800beb06 100644
--- a/upstream/config/config-observability.yaml
+++ b/upstream/config/config-observability.yaml
@@ -59,4 +59,3 @@ data:
metrics.pipelinerun.level: "pipeline"
metrics.pipelinerun.duration-type: "histogram"
metrics.count.enable-reason: "false"
- metrics.running-pipelinerun.level: ""
diff --git a/upstream/config/controller.yaml b/upstream/config/controller.yaml
index 4e70e1c4a53..dad4866396b 100644
--- a/upstream/config/controller.yaml
+++ b/upstream/config/controller.yaml
@@ -112,7 +112,6 @@ spec:
value: tekton.dev/pipeline
securityContext:
allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
capabilities:
drop:
- "ALL"
diff --git a/upstream/config/events.yaml b/upstream/config/events.yaml
index 03914c8a346..088c0e28c15 100644
--- a/upstream/config/events.yaml
+++ b/upstream/config/events.yaml
@@ -90,7 +90,6 @@ spec:
value: /etc/ssl/certs
securityContext:
allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
capabilities:
drop:
- "ALL"
diff --git a/upstream/config/resolvers/200-clusterrole.yaml b/upstream/config/resolvers/200-clusterrole.yaml
index 4364334095d..3215d74b145 100644
--- a/upstream/config/resolvers/200-clusterrole.yaml
+++ b/upstream/config/resolvers/200-clusterrole.yaml
@@ -26,9 +26,9 @@ rules:
resources: ["resolutionrequests", "resolutionrequests/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["tekton.dev"]
- resources: ["tasks", "pipelines", "stepactions"]
+ resources: ["tasks", "pipelines"]
verbs: ["get", "list"]
# Read-only access to these.
- apiGroups: [""]
- resources: ["secrets", "serviceaccounts"]
+ resources: ["secrets"]
verbs: ["get", "list", "watch"]
diff --git a/upstream/config/resolvers/bundleresolver-config.yaml b/upstream/config/resolvers/bundleresolver-config.yaml
index d48372ddd11..62e53fec96a 100644
--- a/upstream/config/resolvers/bundleresolver-config.yaml
+++ b/upstream/config/resolvers/bundleresolver-config.yaml
@@ -22,7 +22,5 @@ metadata:
app.kubernetes.io/instance: default
app.kubernetes.io/part-of: tekton-pipelines
data:
- # the default service account name to use for bundle requests.
- default-service-account: "default"
# The default layer kind in the bundle image.
default-kind: "task"
diff --git a/upstream/config/webhook.yaml b/upstream/config/webhook.yaml
index e80a5bc93df..41922fa1c1b 100644
--- a/upstream/config/webhook.yaml
+++ b/upstream/config/webhook.yaml
@@ -126,7 +126,6 @@ spec:
value: tekton.dev/pipeline
securityContext:
allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
capabilities:
drop:
- "ALL"
diff --git a/upstream/docs/additional-configs.md b/upstream/docs/additional-configs.md
index 6b307bbd151..be60c6c3b2d 100644
--- a/upstream/docs/additional-configs.md
+++ b/upstream/docs/additional-configs.md
@@ -237,7 +237,7 @@ The example below customizes the following:
- the default service account from `default` to `tekton`.
- the default timeout from 60 minutes to 20 minutes.
- the default `app.kubernetes.io/managed-by` label is applied to all Pods created to execute `TaskRuns`.
-- the default Pod template to include a node selector to select the node where the Pod will be scheduled by default. A list of supported fields is available [here](./podtemplates.md#supported-fields).
+- the default Pod template to include a node selector to select the node where the Pod will be scheduled by default. A list of supported fields is available [here](https://github.com/tektoncd/pipeline/blob/main/docs/podtemplates.md#supported-fields).
For more information, see [`PodTemplate` in `TaskRuns`](./taskruns.md#specifying-a-pod-template) or [`PodTemplate` in `PipelineRuns`](./pipelineruns.md#specifying-a-pod-template).
- the default `Workspace` configuration can be set for any `Workspaces` that a Task declares but that a TaskRun does not explicitly provide.
- the default maximum combinations of `Parameters` in a `Matrix` that can be used to fan out a `PipelineTask`. For
@@ -333,8 +333,6 @@ enables [beta features](#beta-features). When using v1 APIs, setting this field
allows only stable features, and setting it to "beta" allows only beta features.
Set this field to "alpha" to allow [alpha features](#alpha-features) to be used.
-- `enable-kubernetes-sidecar`: Set this flag to `"true"` to enable native kubernetes sidecar support. This will allow Tekton sidecars to run as Kubernetes sidecars. Must be using Kubernetes v1.29 or greater.
-
For example:
```yaml
@@ -377,13 +375,17 @@ Features currently in "alpha" are:
| [Hermetic Execution Mode](./hermetic.md) | [TEP-0025](https://github.com/tektoncd/community/blob/main/teps/0025-hermekton.md) | [v0.25.0](https://github.com/tektoncd/pipeline/releases/tag/v0.25.0) | |
| [Windows Scripts](./tasks.md#windows-scripts) | [TEP-0057](https://github.com/tektoncd/community/blob/main/teps/0057-windows-support.md) | [v0.28.0](https://github.com/tektoncd/pipeline/releases/tag/v0.28.0) | |
| [Debug](./debug.md) | [TEP-0042](https://github.com/tektoncd/community/blob/main/teps/0042-taskrun-breakpoint-on-failure.md) | [v0.26.0](https://github.com/tektoncd/pipeline/releases/tag/v0.26.0) | |
+| [Step and Sidecar Overrides](./taskruns.md#overriding-task-steps-and-sidecars) | [TEP-0094](https://github.com/tektoncd/community/blob/main/teps/0094-specifying-resource-requirements-at-runtime.md) | [v0.34.0](https://github.com/tektoncd/pipeline/releases/tag/v0.34.0) | |
| [StdoutConfig and StderrConfig](./tasks#redirecting-step-output-streams-with-stdoutConfig-and-stderrConfig) | [TEP-0011](https://github.com/tektoncd/community/blob/main/teps/0011-redirecting-step-output-streams.md) | [v0.38.0](https://github.com/tektoncd/pipeline/releases/tag/v0.38.0) | |
| [Trusted Resources](./trusted-resources.md) | [TEP-0091](https://github.com/tektoncd/community/blob/main/teps/0091-trusted-resources.md) | [v0.49.0](https://github.com/tektoncd/pipeline/releases/tag/v0.49.0) | `trusted-resources-verification-no-match-policy` |
+| [Larger Results via Sidecar Logs](#enabling-larger-results-using-sidecar-logs) | [TEP-0127](https://github.com/tektoncd/community/blob/main/teps/0127-larger-results-via-sidecar-logs.md) | [v0.43.0](https://github.com/tektoncd/pipeline/releases/tag/v0.43.0) | `results-from` |
| [Configure Default Resolver](./resolution.md#configuring-built-in-resolvers) | [TEP-0133](https://github.com/tektoncd/community/blob/main/teps/0133-configure-default-resolver.md) | [v0.46.0](https://github.com/tektoncd/pipeline/releases/tag/v0.46.0) | |
| [Coschedule](./affinityassistants.md) | [TEP-0135](https://github.com/tektoncd/community/blob/main/teps/0135-coscheduling-pipelinerun-pods.md) | [v0.51.0](https://github.com/tektoncd/pipeline/releases/tag/v0.51.0) | `coschedule` |
| [keep pod on cancel](./taskruns.md#cancelling-a-taskrun) | N/A | [v0.52.0](https://github.com/tektoncd/pipeline/releases/tag/v0.52.0) | `keep-pod-on-cancel` |
| [CEL in WhenExpression](./pipelines.md#use-cel-expression-in-whenexpression) | [TEP-0145](https://github.com/tektoncd/community/blob/main/teps/0145-cel-in-whenexpression.md) | [v0.53.0](https://github.com/tektoncd/pipeline/releases/tag/v0.53.0) | `enable-cel-in-whenexpression` |
| [Param Enum](./taskruns.md#parameter-enums) | [TEP-0144](https://github.com/tektoncd/community/blob/main/teps/0144-param-enum.md) | [v0.54.0](https://github.com/tektoncd/pipeline/releases/tag/v0.54.0) | `enable-param-enum` |
+| [Reusable Steps via StepActions](./stepactions.md) | [TEP-0142](https://github.com/tektoncd/community/blob/main/teps/0142-enable-step-reusability.md) | [v0.54.0](https://github.com/tektoncd/pipeline/releases/tag/v0.54.0) | `enable-step-actions` |
+| [Ignore Task Failure](./pipelines.md#using-the-onerror-field) | [TEP-0050](https://github.com/tektoncd/community/blob/main/teps/0050-ignore-task-failures.md) | N/A | |
### Beta Features
@@ -401,10 +403,6 @@ Features currently in "beta" are:
| [Isolated `Step` & `Sidecar` `Workspaces`](./workspaces.md#isolated-workspaces) | [TEP-0029](https://github.com/tektoncd/community/blob/main/teps/0029-step-workspaces.md) | [v0.24.0](https://github.com/tektoncd/pipeline/releases/tag/v0.24.0) | [v0.50.0](https://github.com/tektoncd/pipeline/releases/tag/v0.50.0) | |
| [Matrix](./matrix.md) | [TEP-0090](https://github.com/tektoncd/community/blob/main/teps/0090-matrix.md) | [v0.38.0](https://github.com/tektoncd/pipeline/releases/tag/v0.38.0) | [v0.53.0](https://github.com/tektoncd/pipeline/releases/tag/v0.53.0) | |
| [Task-level Resource Requirements](compute-resources.md#task-level-compute-resources-configuration) | [TEP-0104](https://github.com/tektoncd/community/blob/main/teps/0104-tasklevel-resource-requirements.md) | [v0.39.0](https://github.com/tektoncd/pipeline/releases/tag/v0.39.0) | [v0.53.0](https://github.com/tektoncd/pipeline/releases/tag/v0.53.0) | |
-| [Reusable Steps via StepActions](./stepactions.md) | [TEP-0142](https://github.com/tektoncd/community/blob/main/teps/0142-enable-step-reusability.md) | [v0.54.0](https://github.com/tektoncd/pipeline/releases/tag/v0.54.0) | `enable-step-actions` |
-| [Larger Results via Sidecar Logs](#enabling-larger-results-using-sidecar-logs) | [TEP-0127](https://github.com/tektoncd/community/blob/main/teps/0127-larger-results-via-sidecar-logs.md) | [v0.43.0](https://github.com/tektoncd/pipeline/releases/tag/v0.43.0) | [v0.61.0](https://github.com/tektoncd/pipeline/releases/tag/v0.61.0) | `results-from` |
-| [Step and Sidecar Overrides](./taskruns.md#overriding-task-steps-and-sidecars) | [TEP-0094](https://github.com/tektoncd/community/blob/main/teps/0094-specifying-resource-requirements-at-runtime.md) | [v0.34.0](https://github.com/tektoncd/pipeline/releases/tag/v0.34.0) | | [v0.61.0](https://github.com/tektoncd/pipeline/releases/tag/v0.61.0) | |
-| [Ignore Task Failure](./pipelines.md#using-the-onerror-field) | [TEP-0050](https://github.com/tektoncd/community/blob/main/teps/0050-ignore-task-failures.md) | [v0.55.0](https://github.com/tektoncd/pipeline/releases/tag/v0.55.0) | [v0.62.0](https://github.com/tektoncd/pipeline/releases/tag/v0.62.0) | N/A |
## Enabling larger results using sidecar logs
@@ -461,8 +459,7 @@ Out-of-the-box, Tekton Pipelines Controller is configured for relatively small-s
To allow TaskRuns and PipelineRuns to run in namespaces with [restricted pod security standards](https://kubernetes.io/docs/concepts/security/pod-security-standards/),
set the "set-security-context" feature flag to "true" in the [feature-flags configMap](#customizing-the-pipelines-controller-behavior). This configuration option applies a [SecurityContext](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
-to any containers injected into TaskRuns by the Pipelines controller. If the [Affinity Assistants](affinityassistants.md) feature is enabled, the SecurityContext is also applied to those containers.
-This SecurityContext may not be supported in all Kubernetes implementations (for example, OpenShift).
+to any containers injected into TaskRuns by the Pipelines controller. This SecurityContext may not be supported in all Kubernetes implementations (for example, OpenShift).
**Note**: running TaskRuns and PipelineRuns in the "tekton-pipelines" namespace is discouraged.
diff --git a/upstream/docs/artifacts.md b/upstream/docs/artifacts.md
index 8f5533f68c6..233dec1c53a 100644
--- a/upstream/docs/artifacts.md
+++ b/upstream/docs/artifacts.md
@@ -8,9 +8,8 @@ weight: 201
# Artifacts
- [Overview](#overview)
-- [Artifact Provenance Data](#artifact-provenance-data)
+- [Artifact Provenance Data](#passing-artifacts-between-steps)
- [Passing Artifacts between Steps](#passing-artifacts-between-steps)
- - [Passing Artifacts between Tasks](#passing-artifacts-between-tasks)
@@ -25,7 +24,6 @@ Artifacts fall into two categories:
- Inputs: Artifacts downloaded and used by the Step/Task.
- Outputs: Artifacts created and uploaded by the Step/Task.
-
Example Structure:
```json
{
@@ -106,108 +104,13 @@ spec:
EOF
```
-The content is written by the `Step` to a file `$(artifacts.path)`:
-
-```yaml
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: step-artifacts-
-spec:
- taskSpec:
- description: |
- A simple task that populates artifacts to TaskRun stepState
- steps:
- - name: artifacts-producer
- image: bash:latest
- script: |
- cat > $(artifacts.path) << EOF
- {
- "inputs":[
- {
- "name":"source",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:oci/nginx:stable-alpine3.17-slim?repository_url=docker.io/library",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
-```
-
It is recommended to use [purl format](https://github.com/package-url/purl-spec/blob/master/PURL-SPECIFICATION.rst) for artifacts uri as shown in the example.
-### Output Artifacts in SLSA Provenance
-
-Artifacts are classified as either:
-
-- Build Outputs - packages, images, etc. that are being published by the build.
-- Build Byproducts - logs, caches, etc. that are incidental artifacts that are produced by the build.
-
-By default, Tekton Chains will consider all output artifacts as `byProducts` when generating in the [SLSA provenance](https://slsa.dev/spec/v1.0/provenance). In order to treat an artifact as a [subject](https://slsa.dev/spec/v1.0/provenance#schema) of the build, you must set a boolean field `"buildOutput": true` for the output artifact.
-
-e.g.
-```yaml
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: step-artifacts-
-spec:
- taskSpec:
- description: |
- A simple task that populates artifacts to TaskRun stepState
- steps:
- - name: artifacts-producer
- image: bash:latest
- script: |
- cat > $(artifacts.path) << EOF
- {
- "outputs":[
- {
- "name":"image",
- "buildOutput": true,
- "values":[
- {
- "uri":"pkg:oci/nginx:stable-alpine3.17-slim?repository_url=docker.io/library",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
-```
-
-This informs Tekton Chains your desire to handle the artifact.
-
-> [!TIP]
-> When authoring a `StepAction` or a `Task`, you can parametrize this field to allow users to indicate their desire depending on what they are uploading - this can be useful for actions that may produce either a build output or a byproduct depending on the context!
-
### Passing Artifacts between Steps
You can pass artifacts from one step to the next using:
+
- Specific Artifact: `$(steps..inputs.)` or `$(steps..outputs.)`
+- Default (First) Artifact: `$(steps..inputs)` or `$(steps..outputs)` (if is omitted)
The example below shows how to access the previous' step artifacts from another step in the same task
@@ -259,12 +162,12 @@ spec:
- name: artifacts-consumer
image: bash:latest
script: |
- echo $(steps.artifacts-producer.outputs.image)
+ echo $(steps.artifacts-producer.outputs)
```
-The resolved value of `$(steps..outputs.)` is the values of an artifact. For this example,
-`$(steps.artifacts-producer.outputs.image)` is resolved to
+The resolved value of `$(steps..outputs.)` or `$(steps..outputs)` is the values of an artifact. For this example,
+`$(steps.artifacts-producer.outputs)` is resolved to
```json
[
{
@@ -279,278 +182,62 @@ The resolved value of `$(steps..outputs.)` is
Upon resolution and execution of the `TaskRun`, the `Status` will look something like:
-
-```json
-{
- "artifacts": {
- "inputs": [
+```yaml
+"steps": [
{
- "name": "source",
- "values": [
+ "container": "step-artifacts-producer",
+ "imageID": "docker.io/library/bash@sha256:5353512b79d2963e92a2b97d9cb52df72d32f94661aa825fcfa0aede73304743",
+ "inputs": [
{
- "digest": {
- "sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- },
- "uri": "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"
+ "name": "source",
+ "values": [
+ {
+ "digest": {
+ "sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
+ },
+ "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
+ }
+ ]
}
- ]
- }
- ],
- "outputs": [
- {
- "name": "image",
- "values": [
+ ],
+ "name": "artifacts-producer",
+ "outputs": [
{
- "digest": {
- "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
- "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"
- },
- "uri": "pkg:oci/nginx:stable-alpine3.17-slim?repository_url=docker.io/library"
+ "name": "image",
+ "values": [
+ {
+ "digest": {
+ "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
+ "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"
+ },
+ "uri":"pkg:oci/nginx:stable-alpine3.17-slim?repository_url=docker.io/library",
+ }
+ ]
}
- ]
- }
- ]
- },
- "steps": [
- {
- "container": "step-artifacts-producer",
- "imageID": "docker.io/library/bash@sha256:5353512b79d2963e92a2b97d9cb52df72d32f94661aa825fcfa0aede73304743",
- "inputs": [
- {
- "name": "source",
- "values": [
- {
- "digest": {
- "sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- },
- "uri": "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"
- }
- ]
- }
- ],
- "name": "artifacts-producer",
- "outputs": [
- {
- "name": "image",
- "values": [
- {
- "digest": {
- "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
- "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"
- },
- "uri": "pkg:oci/nginx:stable-alpine3.17-slim?repository_url=docker.io/library"
- }
- ]
- }
- ],
- "terminated": {
- "containerID": "containerd://010f02d103d1db48531327a1fe09797c87c1d50b6a216892319b3af93e0f56e7",
- "exitCode": 0,
- "finishedAt": "2024-03-18T17:05:06Z",
- "message": "...",
- "reason": "Completed",
- "startedAt": "2024-03-18T17:05:06Z"
- },
- "terminationReason": "Completed"
- },
- {
- "container": "step-artifacts-consumer",
- "imageID": "docker.io/library/bash@sha256:5353512b79d2963e92a2b97d9cb52df72d32f94661aa825fcfa0aede73304743",
- "name": "artifacts-consumer",
- "terminated": {
- "containerID": "containerd://42428aa7e5a507eba924239f213d185dd4bc0882b6f217a79e6792f7fec3586e",
- "exitCode": 0,
- "finishedAt": "2024-03-18T17:05:06Z",
- "reason": "Completed",
- "startedAt": "2024-03-18T17:05:06Z"
+ ],
+ "terminated": {
+ "containerID": "containerd://010f02d103d1db48531327a1fe09797c87c1d50b6a216892319b3af93e0f56e7",
+ "exitCode": 0,
+ "finishedAt": "2024-03-18T17:05:06Z",
+ "message": "...",
+ "reason": "Completed",
+ "startedAt": "2024-03-18T17:05:06Z"
+ },
+ "terminationReason": "Completed"
},
- "terminationReason": "Completed"
- }
- ]
-}
-
-```
-
-### Passing Artifacts between Tasks
-You can pass artifacts from one task to the another using:
-
-- Specific Artifact: `$(tasks..inputs.)` or `$(tasks..outputs.)`
-
-The example below shows how to access the previous' task artifacts from another task in a pipeline
-
-```yaml
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- generateName: pipelinerun-consume-tasks-artifacts
-spec:
- pipelineSpec:
- tasks:
- - name: produce-artifacts-task
- taskSpec:
- description: |
- A simple task that produces artifacts
- steps:
- - name: produce-artifacts
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- cat > $(artifacts.path) << EOF
- {
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
- - name: consume-artifacts
- runAfter:
- - produce-artifacts-task
- taskSpec:
- steps:
- - name: artifacts-consumer-python
- image: python:latest
- script: |
- #!/usr/bin/env python3
- import json
- data = json.loads('$(tasks.produce-artifacts-task.outputs.image)')
- if data[0]['uri'] != "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c":
- exit(1)
-```
-
-
-Similar to Step Artifacts. The resolved value of `$(tasks..outputs.)` is the values of an artifact. For this example,
-`$(tasks.produce-artifacts-task.outputs.image)` is resolved to
-```json
-[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
-]
-```
-Upon resolution and execution of the `TaskRun`, the `Status` will look something like:
-```json
-{
- "artifacts": {
- "inputs": [
- {
- "name": "input-artifacts",
- "values": [
- {
- "digest": {
- "sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- },
- "uri": "pkg:example.github.com/inputs"
- }
- ]
- }
- ],
- "outputs": [
- {
- "name": "image",
- "values": [
- {
- "digest": {
- "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
- "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"
- },
- "uri": "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"
- }
- ]
- }
- ]
- },
- "completionTime": "2024-05-28T14:10:58Z",
- "conditions": [
{
- "lastTransitionTime": "2024-05-28T14:10:58Z",
- "message": "All Steps have completed executing",
- "reason": "Succeeded",
- "status": "True",
- "type": "Succeeded"
- }
- ],
- "podName": "pipelinerun-consume-tasks-a41ee44e4f964e95adfd3aea417d52f90-pod",
- "provenance": {
- "featureFlags": {
- "AwaitSidecarReadiness": true,
- "Coschedule": "workspaces",
- "DisableAffinityAssistant": false,
- "DisableCredsInit": false,
- "DisableInlineSpec": "",
- "EnableAPIFields": "beta",
- "EnableArtifacts": true,
- "EnableCELInWhenExpression": false,
- "EnableConciseResolverSyntax": false,
- "EnableKeepPodOnCancel": false,
- "EnableParamEnum": false,
- "EnableProvenanceInStatus": true,
- "EnableStepActions": true,
- "EnableTektonOCIBundles": false,
- "EnforceNonfalsifiability": "none",
- "MaxResultSize": 4096,
- "RequireGitSSHSecretKnownHosts": false,
- "ResultExtractionMethod": "termination-message",
- "RunningInEnvWithInjectedSidecars": true,
- "ScopeWhenExpressionsToTask": false,
- "SendCloudEventsForRuns": false,
- "SetSecurityContext": false,
- "VerificationNoMatchPolicy": "ignore"
- }
- },
- "startTime": "2024-05-28T14:10:48Z",
- "steps": [
- {
- "container": "step-produce-artifacts",
- "imageID": "docker.io/library/bash@sha256:23f90212fd89e4c292d7b41386ef1a6ac2b8a02bbc6947680bfe184cbc1a2899",
- "name": "produce-artifacts",
+ "container": "step-artifacts-consumer",
+ "imageID": "docker.io/library/bash@sha256:5353512b79d2963e92a2b97d9cb52df72d32f94661aa825fcfa0aede73304743",
+ "name": "artifacts-consumer",
"terminated": {
- "containerID": "containerd://1291ce07b175a7897beee6ba62eaa1528427bacb1f76b31435eeba68828c445a",
+ "containerID": "containerd://42428aa7e5a507eba924239f213d185dd4bc0882b6f217a79e6792f7fec3586e",
"exitCode": 0,
- "finishedAt": "2024-05-28T14:10:57Z",
- "message": "...",
+ "finishedAt": "2024-03-18T17:05:06Z",
"reason": "Completed",
- "startedAt": "2024-05-28T14:10:57Z"
+ "startedAt": "2024-03-18T17:05:06Z"
},
"terminationReason": "Completed"
}
],
- "taskSpec": {
- "description": "A simple task that produces artifacts\n",
- "steps": [
- {
- "computeResources": {},
- "image": "bash:latest",
- "name": "produce-artifacts",
- "script": "#!/usr/bin/env bash\ncat > /tekton/artifacts/provenance.json << EOF\n{\n \"inputs\":[\n {\n \"name\":\"input-artifacts\",\n \"values\":[\n {\n \"uri\":\"pkg:example.github.com/inputs\",\n \"digest\":{\n \"sha256\":\"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0\"\n }\n }\n ]\n }\n ],\n \"outputs\":[\n {\n \"name\":\"image\",\n \"values\":[\n {\n \"uri\":\"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c\",\n \"digest\":{\n \"sha256\":\"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48\",\n \"sha1\":\"95588b8f34c31eb7d62c92aaa4e6506639b06ef2\"\n }\n }\n ]\n }\n ]\n}\nEOF\n"
- }
- ]
- }
-}
-```
+
+```
\ No newline at end of file
diff --git a/upstream/docs/cluster-resolver.md b/upstream/docs/cluster-resolver.md
index 1a43f579449..fc545420ce8 100644
--- a/upstream/docs/cluster-resolver.md
+++ b/upstream/docs/cluster-resolver.md
@@ -13,11 +13,11 @@ This Resolver responds to type `cluster`.
## Parameters
-| Param Name | Description | Example Value |
-|-------------|-------------------------------------------------------|----------------------------------|
-| `kind` | The kind of resource to fetch. | `task`, `pipeline`, `stepaction` |
-| `name` | The name of the resource to fetch. | `some-pipeline`, `some-task` |
-| `namespace` | The namespace in the cluster containing the resource. | `default`, `other-namespace` |
+| Param Name | Description | Example Value |
+|-------------|-------------------------------------------------------|------------------------------|
+| `kind` | The kind of resource to fetch. | `task`, `pipeline` |
+| `name` | The name of the resource to fetch. | `some-pipeline`, `some-task` |
+| `namespace` | The namespace in the cluster containing the resource. | `default`, `other-namespace` |
## Requirements
@@ -37,7 +37,7 @@ for the name, namespace and defaults that the resolver ships with.
| Option Name | Description | Example Values |
|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|
-| `default-kind` | The default resource kind to fetch if not specified in parameters. | `task`, `pipeline`, `stepaction` |
+| `default-kind` | The default resource kind to fetch if not specified in parameters. | `task`, `pipeline` |
| `default-namespace` | The default namespace to fetch resources from if not specified in parameters. | `default`, `some-namespace` |
| `allowed-namespaces` | An optional comma-separated list of namespaces which the resolver is allowed to access. Defaults to empty, meaning all namespaces are allowed. | `default,some-namespace`, (empty) |
| `blocked-namespaces` | An optional comma-separated list of namespaces which the resolver is blocked from accessing. If the value is a `*` all namespaces will be disallowed and allowed namespace will need to be explicitely listed in `allowed-namespaces`. Defaults to empty, meaning all namespaces are allowed. | `default,other-namespace`, `*`, (empty) |
@@ -63,27 +63,6 @@ spec:
value: namespace-containing-task
```
-### StepAction Resolution
-
-```yaml
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
- name: remote-stepaction-reference
-spec:
- steps:
- - name: step-action-example
- ref
- resolver: cluster
- params:
- - name: kind
- value: stepaction
- - name: name
- value: some-stepaction
- - name: namespace
- value: namespace-containing-stepaction
-```
-
### Pipeline resolution
```yaml
diff --git a/upstream/docs/debug.md b/upstream/docs/debug.md
index dae54e86bd7..ff3d62a9ff4 100644
--- a/upstream/docs/debug.md
+++ b/upstream/docs/debug.md
@@ -13,8 +13,7 @@ weight: 108
- [Breakpoint on Failure](#breakpoint-on-failure)
- [Failure of a Step](#failure-of-a-step)
- [Halting a Step on failure](#halting-a-step-on-failure)
- - [Exiting onfailure breakpoint](#exiting-onfailure-breakpoint)
- - [Breakpoint before step](#breakpoint-before-step)
+ - [Exiting breakpoint](#exiting-breakpoint)
- [Debug Environment](#debug-environment)
- [Mounts](#mounts)
- [Debug Scripts](#debug-scripts)
@@ -60,26 +59,12 @@ stopping write of the `.err` file and waiting on a signal by the user t
In this breakpoint, which is essentially a limbo state the TaskRun finds itself in, the user can interact with the step
environment using a CLI or an IDE.
-#### Exiting onfailure breakpoint
+#### Exiting breakpoint
To exit a step which has been paused upon failure, the step would wait on a file similar to `.breakpointexit` which
would unpause and exit the step container. eg: Step 0 fails and is paused. Writing `0.breakpointexit` in `/tekton/run`
would unpause and exit the step container.
-### Breakpoint before step
-
-
-TaskRun will be stuck waiting for user debugging before the step execution.
-When beforeStep-Breakpoint takes effect, the user can see the following information
-from the corresponding step container log:
-```
-debug before step breakpoint has taken effect, waiting for user's decision:
-1) continue, use cmd: /tekton/debug/scripts/debug-beforestep-continue
-2) fail-continue, use cmd: /tekton/debug/scripts/debug-beforestep-fail-continue
-```
-1. Executing /tekton/debug/scripts/debug-beforestep-continue will continue to execute the step program
-2. Executing /tekton/debug/scripts/debug-beforestep-fail-continue will not continue to execute the task, and will mark the step as failed
-
## Debug Environment
Additional environment augmentations made available to the TaskRun Pod to aid in troubleshooting and managing step lifecycle.
@@ -95,13 +80,7 @@ to reflect step number. eg: Step 0 will have `/tekton/debug/info/0`, Step 1 will
### Debug Scripts
`/tekton/debug/scripts/debug-continue` : Mark the step as completed with success by writing to `/tekton/run`. eg: User wants to exit
-onfailure breakpoint for failed step 0. Running this script would create `/tekton/run/0` and `/tekton/run/0/out.breakpointexit`.
+breakpoint for failed step 0. Running this script would create `/tekton/run/0` and `/tekton/run/0.breakpointexit`.
`/tekton/debug/scripts/debug-fail-continue` : Mark the step as completed with failure by writing to `/tekton/run`. eg: User wants to exit
-onfailure breakpoint for failed step 0. Running this script would create `/tekton/run/0` and `/tekton/run/0/out.breakpointexit.err`.
-
-`/tekton/debug/scripts/debug-beforestep-continue` : Mark the step continue to execute by writing to `/tekton/run`. eg: User wants to exit
-before step breakpoint for before step 0. Running this script would create `/tekton/run/0` and `/tekton/run/0/out.beforestepexit`.
-
-`/tekton/debug/scripts/debug-beforestep-fail-continue` : Mark the step not continue to execute by writing to `/tekton/run`. eg: User wants to exit
-before step breakpoint for before step 0. Running this script would create `/tekton/run/0` and `/tekton/run/0/out.beforestepexit.err`.
+breakpoint for failed step 0. Running this script would create `/tekton/run/0.err` and `/tekton/run/0.breakpointexit`.
diff --git a/upstream/docs/deprecations.md b/upstream/docs/deprecations.md
index 2421432ee42..8f51ee1ac46 100644
--- a/upstream/docs/deprecations.md
+++ b/upstream/docs/deprecations.md
@@ -23,11 +23,11 @@ The following features are deprecated but have not yet been removed.
|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------------------------------------|
| [Several fields of Task.Step are deprecated](https://github.com/tektoncd/pipeline/issues/4737) | v0.36.0 | Beta | Feb 25, 2023 |
| [ClusterTask is deprecated](https://github.com/tektoncd/pipeline/issues/4476) | v0.41.0 | Beta | July 13, 2023 |
+| [`pipelineRef.bundle` and `taskRef.bundle` are deprecated](https://github.com/tektoncd/pipeline/issues/5514) | v0.41.0 | Alpha | July 13, 2023 |
| [The `config-trusted-resources` configMap is deprecated](https://github.com/tektoncd/pipeline/issues/5852) | v0.45.0 | Alpha | v0.46.0 |
| [The `default-cloud-events-sink` setting in the `config-defaults` configMap is deprecated](https://github.com/tektoncd/pipeline/pull/6883) in favour of the new `config-events` configMap. | v0.50.0 | N/A | v0.59.0 |
| [v1beta1 Tasks, TaskRuns, Pipelines, and PipelineRuns are deprecated in favor of v1](https://github.com/tektoncd/pipeline/issues/5541) | v0.50.0 | Beta | v0.62.0 |
| [The `disable-affinity-assistant` feature flag is deprecated in favor of the `coschedule` feature flag](https://github.com/tektoncd/pipeline/issues/6988) | v0.51.0 | N/A | April 27, 2024 |
-| The `resolver framework` is [deprecated](https://github.com/tektoncd/pipeline/pull/7945) in favor of an [upgraded framework](https://github.com/tektoncd/pipeline/pull/7910) | v0.60.0 | N/A | v0.72. |
### v1beta1 deprecation
@@ -66,4 +66,4 @@ See [TEP-0074](https://github.com/tektoncd/community/blob/main/teps/0074-depreca
- The generic pipelineResources functions including inputs and outputs resources and the `from` type
-- [TaskRun.Status.ResourcesResult is deprecated and tombstoned #6301](https://github.com/tektoncd/pipeline/issues/6325)
+- [TaskRun.Status.ResourcesResult is deprecated and tombstoned #6301](https://github.com/tektoncd/pipeline/issues/6325)
\ No newline at end of file
diff --git a/upstream/docs/developers/taskruns.md b/upstream/docs/developers/taskruns.md
index 5b35692bde8..1b702433233 100644
--- a/upstream/docs/developers/taskruns.md
+++ b/upstream/docs/developers/taskruns.md
@@ -284,54 +284,4 @@ There are known issues with the existing implementation of sidecars:
but an Error when the sidecar exits with an error. This is only apparent when
using `kubectl` to get the pods of a TaskRun, not when describing the Pod
using `kubectl describe pod ...` nor when looking at the TaskRun, but can be
- quite confusing.
-
-## Breakpoint on Failure
-
-Halting a TaskRun execution on Failure of a step.
-
-### Failure of a Step
-
-The entrypoint binary is used to manage the lifecycle of a step. Steps are aligned beforehand by the TaskRun controller
-allowing each step to run in a particular order. This is done using `-wait_file` and the `-post_file` flags. The former
-let's the entrypoint binary know that it has to wait on creation of a particular file before starting execution of the step.
-And the latter provides information on the step number and signal the next step on completion of the step.
-
-On success of a step, the `-post-file` is written as is, signalling the next step which would have the same argument given
-for `-wait_file` to resume the entrypoint process and move ahead with the step.
-
-On failure of a step, the `-post_file` is written with appending `.err` to it denoting that the previous step has failed with
-and error. The subsequent steps are skipped in this case as well, marking the TaskRun as a failure.
-
-### Halting a Step on failure
-
-The failed step writes `.err` to `/tekton/run` and stops running completely. To be able to debug a step we would
-need it to continue running (not exit), not skip the next steps and signal health of the step. By disabling step skipping,
-stopping write of the `.err` file and waiting on a signal by the user to disable the halt, we would be simulating a
-"breakpoint".
-
-In this breakpoint, which is essentially a limbo state the TaskRun finds itself in, the user can interact with the step
-environment using a CLI or an IDE.
-
-### Exiting onfailure breakpoint
-
-To exit a step which has been paused upon failure, the step would wait on a file similar to `.breakpointexit` which
-would unpause and exit the step container. eg: Step 0 fails and is paused. Writing `0.breakpointexit` in `/tekton/run`
-would unpause and exit the step container.
-
-## Breakpoint before step
-
-TaskRun will be stuck waiting for user debugging before the step execution.
-
-### Halting a Step before execution
-
-The step program will be executed after all the `-wait_file` monitoring ends. If want the user to enter the debugging before the step is executed,
-need to pass a parameter `debug_before_step` to `entrypoint`,
-and `entrypoint` will end the monitoring of `waitFiles` back pause,
-waiting to listen to the `/tekton/run/0/out.beforestepexit` file
-
-### Exiting before step breakpoint
-
-`entrypoint` listening `/tekton/run/{{ stepID }}/out.beforestepexit` or `/tekton/run/{{ stepID }}/out.beforestepexit.err` to
-decide whether to proceed this step, `out.beforestepexit` means continue with step,
-`out.beforestepexit.err` means do not continue with the step.
\ No newline at end of file
+ quite confusing.
\ No newline at end of file
diff --git a/upstream/docs/git-resolver.md b/upstream/docs/git-resolver.md
index 826f9a9c0f9..7ec56fda7da 100644
--- a/upstream/docs/git-resolver.md
+++ b/upstream/docs/git-resolver.md
@@ -114,6 +114,10 @@ Note that not all `go-scm` implementations have been tested with the `git` resol
* BitBucket Server
* BitBucket Cloud
+Fetching from multiple Git providers with different configuration is not
+supported. You can use the [http resolver](./http-resolver.md) to fetch URL
+from another provider with different credentials.
+
#### Task Resolution
```yaml
@@ -191,118 +195,6 @@ spec:
value: Ranni
```
-### Specifying Configuration for Multiple Git Providers
-
-It is possible to specify configurations for multiple providers and even multiple configurations for same provider to use in
-different tekton resources. Firstly, details need to be added in configmap with the unique identifier key prefix.
-To use them in tekton resources, pass the unique key mentioned in configmap as an extra param to resolver with key
-`configKey` and value will be the unique key. If no `configKey` param is passed, `default` will be used. Default
-configuration to be used for git resolver can be specified in configmap by either mentioning no unique identifier or
-using identifier `default`
-
-**Note**: `configKey` should not contain `.` while specifying configurations in configmap
-
-### Example Configmap
-
-Multiple configurations can be specified in `git-resolver-config` configmap like this. All keys mentioned above are supported.
-
-```yaml
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: git-resolver-config
- namespace: tekton-pipelines-resolvers
- labels:
- app.kubernetes.io/component: resolvers
- app.kubernetes.io/instance: default
- app.kubernetes.io/part-of: tekton-pipelines
-data:
- # configuration 1, default one to use if no configKey provided or provided with value default
- fetch-timeout: "1m"
- default-url: "https://github.com/tektoncd/catalog.git"
- default-revision: "main"
- scm-type: "github"
- server-url: ""
- api-token-secret-name: ""
- api-token-secret-key: ""
- api-token-secret-namespace: "default"
- default-org: ""
-
- # configuration 2, will be used if configKey param passed with value test1
- test1.fetch-timeout: "5m"
- test1.default-url: ""
- test1.default-revision: "stable"
- test1.scm-type: "github"
- test1.server-url: "api.internal-github.com"
- test1.api-token-secret-name: "test1-secret"
- test1.api-token-secret-key: "token"
- test1.api-token-secret-namespace: "test1"
- test1.default-org: "tektoncd"
-
- # configuration 3, will be used if configKey param passed with value test2
- test2.fetch-timeout: "10m"
- test2.default-url: ""
- test2.default-revision: "stable"
- test2.scm-type: "gitlab"
- test2.server-url: "api.internal-gitlab.com"
- test2.api-token-secret-name: "test2-secret"
- test2.api-token-secret-key: "pat"
- test2.api-token-secret-namespace: "test2"
- test2.default-org: "tektoncd-infra"
-```
-
-#### Task Resolution
-
-A specific configurations from the configMap can be selected by passing the parameter `configKey` with the value
-matching one of the configuration keys used in the configMap.
-
-```yaml
-apiVersion: tekton.dev/v1beta1
-kind: TaskRun
-metadata:
- name: git-api-demo-tr
-spec:
- taskRef:
- resolver: git
- params:
- - name: org
- value: tektoncd
- - name: repo
- value: catalog
- - name: revision
- value: main
- - name: pathInRepo
- value: task/git-clone/0.6/git-clone.yaml
- - name: configKey
- value: test1
-```
-
-#### Pipeline resolution
-
-```yaml
-apiVersion: tekton.dev/v1beta1
-kind: PipelineRun
-metadata:
- name: git-api-demo-pr
-spec:
- pipelineRef:
- resolver: git
- params:
- - name: org
- value: tektoncd
- - name: repo
- value: catalog
- - name: revision
- value: main
- - name: pathInRepo
- value: pipeline/simple/0.1/simple.yaml
- - name: configKey
- value: test2
- params:
- - name: name
- value: Ranni
-```
-
## `ResolutionRequest` Status
`ResolutionRequest.Status.RefSource` field captures the source where the remote resource came from. It includes the 3 subfields: `url`, `digest` and `entrypoint`.
- `url`
diff --git a/upstream/docs/how-to-write-a-resolver.md b/upstream/docs/how-to-write-a-resolver.md
index c126d16b87e..0237fa11daf 100644
--- a/upstream/docs/how-to-write-a-resolver.md
+++ b/upstream/docs/how-to-write-a-resolver.md
@@ -54,7 +54,7 @@ You'll also need the following:
- A computer with
[`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) and
[`ko`](https://github.com/google/ko) installed.
-- A Kubernetes cluster running at least Kubernetes 1.28. A [`kind`
+- A Kubernetes cluster running at least Kubernetes 1.27. A [`kind`
cluster](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
should work fine for following the guide on your local machine.
- An image registry that you can push images to. If you're using `kind`
@@ -97,29 +97,6 @@ a little bit of boilerplate.
Create `cmd/demoresolver/main.go` with the following setup code:
-{{% tabs %}}
-
-{{% tab "Latest Framework" %}}
-```go
-package main
-
-import (
- "context"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "knative.dev/pkg/injection/sharedmain"
-)
-
-func main() {
- sharedmain.Main("controller",
- framework.NewController(context.Background(), &resolver{}),
- )
-}
-
-type resolver struct {}
-```
-{{% /tab %}}
-
-{{% tab "Previous Framework (Deprecated)" %}}
```go
package main
@@ -138,10 +115,6 @@ func main() {
type resolver struct {}
```
-{{% /tab %}}
-
-{{% /tabs %}}
-
This won't compile yet but you can download the dependencies by running:
```bash
@@ -216,24 +189,6 @@ example resolver.
We'll also need to add another import for this package at the top:
-{{% tabs %}}
-
-{{% tab "Latest Framework" %}}
-```go
-import (
- "context"
-
- // Add this one; it defines LabelKeyResolverType we use in GetSelector
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "knative.dev/pkg/injection/sharedmain"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
-)
-```
-{{% /tab %}}
-
-{{% tab "Previous Framework (Deprecated)" %}}
-
```go
import (
"context"
@@ -246,184 +201,40 @@ import (
)
```
-{{% /tab %}}
+## The `ValidateParams` method
-{{% /tabs %}}
-
-## The `Validate` method
-
-The `Validate` method checks that the resolution-spec submitted as part of
+The `ValidateParams` method checks that the params submitted as part of
a resolution request are valid. Our example resolver doesn't expect
-any params in the spec so we'll simply ensure that the there are no params.
-Our example resolver also expects format for the `url` to be `demoscheme://` so we'll validate this format.
-In the previous version, this was instead called `ValidateParams` method. See below
-for the differences.
-
-{{% tabs %}}
-
-{{% tab "Latest Framework" %}}
+any params so we'll simply ensure that the given map is empty.
```go
-// Validate ensures that the resolution spec from a request is as expected.
-func (r *resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return errors.New("no params allowed")
- }
- url := req.URL
- u, err := neturl.ParseRequestURI(url)
- if err != nil {
- return err
- }
- if u.Scheme != "demoscheme" {
- return fmt.Errorf("Invalid Scheme. Want %s, Got %s", "demoscheme", u.Scheme)
- }
- if u.Path == "" {
- return errors.New("Empty path.")
- }
- return nil
-}
-```
-
-You'll also need to add the `net/url` as `neturl` and `"errors"` package to your list of imports at
-the top of the file.
-
-```
-{{% /tab %}}
-
-{{% tab "Previous Framework (Deprecated)" %}}
-
-```go
-// ValidateParams ensures that the params from a request are as expected.
-func (r *resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- if len(req.Params) > 0 {
+// ValidateParams ensures parameters from a request are as expected.
+func (r *resolver) ValidateParams(ctx context.Context, params map[string]string) error {
+ if len(params) > 0 {
return errors.New("no params allowed")
}
return nil
}
```
-{{% /tab %}}
-
-{{% /tabs %}}
-
You'll also need to add the `"errors"` package to your list of imports at
the top of the file.
## The `Resolve` method
We implement the `Resolve` method to do the heavy lifting of fetching
-the contents of a file and returning them. It takes in the resolution request spec as input.
-For this example we're just going to return a hard-coded string of YAML. Since Tekton Pipelines
+the contents of a file and returning them. For this example we're just
+going to return a hard-coded string of YAML. Since Tekton Pipelines
currently only supports fetching Pipeline resources via remote
resolution that's what we'll return.
-
The method signature we're implementing here has a
`framework.ResolvedResource` interface as one of its return values. This
is another type we have to implement but it has a small footprint:
-{{% tabs %}}
-
-{{% tab "Latest Framework" %}}
-
-
```go
-// Resolve uses the given resolution spec to resolve the requested file or resource.
-func (r *resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
- return &myResolvedResource{}, nil
-}
-
-// our hard-coded resolved file to return
-const pipeline = `
-apiVersion: tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: my-pipeline
-spec:
- tasks:
- - name: hello-world
- taskSpec:
- steps:
- - image: alpine:3.15.1
- script: |
- echo "hello world"
-`
-
-// myResolvedResource wraps the data we want to return to Pipelines
-type myResolvedResource struct {}
-
-// Data returns the bytes of our hard-coded Pipeline
-func (*myResolvedResource) Data() []byte {
- return []byte(pipeline)
-}
-
-// Annotations returns any metadata needed alongside the data. None atm.
-func (*myResolvedResource) Annotations() map[string]string {
- return nil
-}
-
-// RefSource is the source reference of the remote data that records where the remote
-// file came from including the url, digest and the entrypoint. None atm.
-func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
- return nil
-}
-```
-
-{{% /tab %}}
-
-{{% tab "Previous Framework (Deprecated)" %}}
-
-
-```go
-// Resolve uses the given resolution spec to resolve the requested file or resource.
-func (r *resolver) Resolve(ctx context.Context, params []pipelinev1.Param) (framework.ResolvedResource, error) {
- return &myResolvedResource{}, nil
-}
-
-// our hard-coded resolved file to return
-const pipeline = `
-apiVersion: tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: my-pipeline
-spec:
- tasks:
- - name: hello-world
- taskSpec:
- steps:
- - image: alpine:3.15.1
- script: |
- echo "hello world"
-`
-
-// myResolvedResource wraps the data we want to return to Pipelines
-type myResolvedResource struct {}
-
-// Data returns the bytes of our hard-coded Pipeline
-func (*myResolvedResource) Data() []byte {
- return []byte(pipeline)
-}
-
-// Annotations returns any metadata needed alongside the data. None atm.
-func (*myResolvedResource) Annotations() map[string]string {
- return nil
-}
-
-// RefSource is the source reference of the remote data that records where the remote
-// file came from including the url, digest and the entrypoint. None atm.
-func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
- return nil
-}
-```
-
-
-{{% /tab %}}
-
-{{% /tabs %}}
-
-```go
-// Resolve uses the given resolution spec to resolve the requested file or resource.
-func (r *resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
+// Resolve uses the given params to resolve the requested file or resource.
+func (r *resolver) Resolve(ctx context.Context, params map[string]string) (framework.ResolvedResource, error) {
return &myResolvedResource{}, nil
}
@@ -482,7 +293,6 @@ func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
}
```
-
## The deployment configuration
Finally, our resolver needs some deployment configuration so that it can
diff --git a/upstream/docs/install.md b/upstream/docs/install.md
index 25fd35ff1d5..df0c6575888 100644
--- a/upstream/docs/install.md
+++ b/upstream/docs/install.md
@@ -21,7 +21,7 @@ This guide explains how to install Tekton Pipelines.
## Prerequisites
-- A [Kubernetes cluster][k8s] running version 1.28 or later.
+- A [Kubernetes cluster][k8s] running version 1.25 or later.
- [Kubectl][].
- Grant `cluster-admin` privileges to the current user. See the [Kubernetes
role-based access control (RBAC) docs][rbac] for more information.
diff --git a/upstream/docs/matrix.md b/upstream/docs/matrix.md
index 3da515c36a5..6db2baaf27b 100644
--- a/upstream/docs/matrix.md
+++ b/upstream/docs/matrix.md
@@ -1004,8 +1004,8 @@ status:
```
[cel]: https://github.com/tektoncd/experimental/tree/1609827ea81d05c8d00f8933c5c9d6150cd36989/cel
-[pr-with-matrix]: https://github.com/tektoncd/pipeline/blob/main/examples/v1/pipelineruns/beta/pipelinerun-with-matrix.yaml
-[pr-with-matrix-and-results]: https://github.com/tektoncd/pipeline/blob/main/examples/v1/pipelineruns/beta/pipelinerun-with-matrix-and-results.yaml
-[pr-with-matrix-context-variables]: https://github.com/tektoncd/pipeline/blob/main/examples/v1/pipelineruns/beta/pipelinerun-with-matrix-context-variables.yaml
-[pr-with-matrix-emitting-results]: https://github.com/tektoncd/pipeline/blob/main/examples/v1/pipelineruns/beta/pipelinerun-with-matrix-emitting-results.yaml
+[pr-with-matrix]: ../examples/v1/pipelineruns/beta/pipelinerun-with-matrix.yaml
+[pr-with-matrix-and-results]: ../examples/v1/pipelineruns/beta/pipelinerun-with-matrix-and-results.yaml
+[pr-with-matrix-context-variables]: ../examples/v1/pipelineruns/beta/pipelinerun-with-matrix-context-variables.yaml
+[pr-with-matrix-emitting-results]: ../examples/v1/pipelineruns/beta/pipelinerun-with-matrix-emitting-results.yaml
[retries]: pipelines.md#using-the-retries-field
diff --git a/upstream/docs/metrics.md b/upstream/docs/metrics.md
index 2e0e826989e..3df1df461ce 100644
--- a/upstream/docs/metrics.md
+++ b/upstream/docs/metrics.md
@@ -11,24 +11,24 @@ The following pipeline metrics are available at `controller-service` on port `90
We expose several kinds of exporters, including Prometheus, Google Stackdriver, and many others. You can set them up using [observability configuration](../config/config-observability.yaml).
-| Name | Type | Labels/Tags | Status |
-|-----------------------------------------------------------------------------------------| ----------- |-------------------------------------------------| ----------- |
+| Name | Type | Labels/Tags | Status |
+|-----------------------------------------------------------------------------------------| ----------- | ----------- | ----------- |
| `tekton_pipelines_controller_pipelinerun_duration_seconds_[bucket, sum, count]` | Histogram/LastValue(Gauge) | `*pipeline`=<pipeline_name> `*pipelinerun`=<pipelinerun_name> `status`=<status> `namespace`=<pipelinerun-namespace> | experimental |
-| `tekton_pipelines_controller_pipelinerun_taskrun_duration_seconds_[bucket, sum, count]` | Histogram/LastValue(Gauge) | `*pipeline`=<pipeline_name> `*pipelinerun`=<pipelinerun_name> `status`=<status> `*task`=<task_name> `*taskrun`=<taskrun_name> `namespace`=<pipelineruns-taskruns-namespace> `*reason`=<reason> | experimental |
-| `tekton_pipelines_controller_pipelinerun_count` | Counter | `status`=<status> `*reason`=<reason> | deprecate |
-| `tekton_pipelines_controller_pipelinerun_total` | Counter | `status`=<status> | experimental |
-| `tekton_pipelines_controller_running_pipelineruns_count` | Gauge | | deprecate |
-| `tekton_pipelines_controller_running_pipelineruns` | Gauge | | experimental |
-| `tekton_pipelines_controller_taskrun_duration_seconds_[bucket, sum, count]` | Histogram/LastValue(Gauge) | `status`=<status> `*task`=<task_name> `*taskrun`=<taskrun_name> `namespace`=<pipelineruns-taskruns-namespace> `*reason`=<reason> | experimental |
-| `tekton_pipelines_controller_taskrun_count` | Counter | `status`=<status> `*reason`=<reason> | deprecate |
-| `tekton_pipelines_controller_taskrun_total` | Counter | `status`=<status> | experimental |
-| `tekton_pipelines_controller_running_taskruns_count` | Gauge | | deprecate |
-| `tekton_pipelines_controller_running_taskruns` | Gauge | | experimental |
-| `tekton_pipelines_controller_running_taskruns_throttled_by_quota_count` | Gauge | `namespace`=<pipelinerun-namespace> | deprecate |
-| `tekton_pipelines_controller_running_taskruns_throttled_by_node_count` | Gauge | `namespace`=<pipelinerun-namespace> | deprecate |
-| `tekton_pipelines_controller_running_taskruns_throttled_by_quota` | Gauge | `namespace`=<pipelinerun-namespace> | experimental |
-| `tekton_pipelines_controller_running_taskruns_throttled_by_node` | Gauge | `namespace`=<pipelinerun-namespace> | experimental |
-| `tekton_pipelines_controller_client_latency_[bucket, sum, count]` | Histogram | | experimental |
+| `tekton_pipelines_controller_pipelinerun_taskrun_duration_seconds_[bucket, sum, count]` | Histogram/LastValue(Gauge) | `*pipeline`=<pipeline_name> `*pipelinerun`=<pipelinerun_name> `status`=<status> `*task`=<task_name> `*taskrun`=<taskrun_name> `namespace`=<pipelineruns-taskruns-namespace>| experimental |
+| `tekton_pipelines_controller_pipelinerun_count` | Counter | `status`=<status> | deprecate |
+| `tekton_pipelines_controller_pipelinerun_total` | Counter | `status`=<status> | experimental |
+| `tekton_pipelines_controller_running_pipelineruns_count` | Gauge | | deprecate |
+| `tekton_pipelines_controller_running_pipelineruns` | Gauge | | experimental |
+| `tekton_pipelines_controller_taskrun_duration_seconds_[bucket, sum, count]` | Histogram/LastValue(Gauge) | `status`=<status> `*task`=<task_name> `*taskrun`=<taskrun_name> `namespace`=<pipelineruns-taskruns-namespace> | experimental |
+| `tekton_pipelines_controller_taskrun_count` | Counter | `status`=<status> | deprecate |
+| `tekton_pipelines_controller_taskrun_total` | Counter | `status`=<status> | experimental |
+| `tekton_pipelines_controller_running_taskruns_count` | Gauge | | deprecate |
+| `tekton_pipelines_controller_running_taskruns` | Gauge | | experimental |
+| `tekton_pipelines_controller_running_taskruns_throttled_by_quota_count` | Gauge | | deprecate |
+| `tekton_pipelines_controller_running_taskruns_throttled_by_node_count` | Gauge | | deprecate |
+| `tekton_pipelines_controller_running_taskruns_throttled_by_quota` | Gauge | | experimental |
+| `tekton_pipelines_controller_running_taskruns_throttled_by_node` | Gauge | | experimental |
+| `tekton_pipelines_controller_client_latency_[bucket, sum, count]` | Histogram | | experimental |
The Labels/Tag marked as "*" are optional. And there's a choice between Histogram and LastValue(Gauge) for pipelinerun and taskrun duration metrics.
@@ -41,31 +41,25 @@ A sample config-map has been provided as [config-observability](./../config/conf
metrics.taskrun.level: "task"
metrics.taskrun.duration-type: "histogram"
metrics.pipelinerun.level: "pipeline"
- metrics.running-pipelinerun.level: ""
metrics.pipelinerun.duration-type: "histogram"
metrics.count.enable-reason: "false"
```
Following values are available in the configmap:
-| configmap data | value | description |
-| -- | ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| metrics.taskrun.level | `taskrun` | Level of metrics is taskrun |
-| metrics.taskrun.level | `task` | Level of metrics is task and taskrun label isn't present in the metrics |
-| metrics.taskrun.level | `namespace` | Level of metrics is namespace, and task and taskrun label isn't present in the metrics
-| metrics.pipelinerun.level | `pipelinerun` | Level of metrics is pipelinerun |
-| metrics.pipelinerun.level | `pipeline` | Level of metrics is pipeline and pipelinerun label isn't present in the metrics |
-| metrics.pipelinerun.level | `namespace` | Level of metrics is namespace, pipeline and pipelinerun label isn't present in the metrics |
-| metrics.running-pipelinerun.level | `pipelinerun` | Level of running-pipelinerun metrics is pipelinerun |
-| metrics.running-pipelinerun.level | `pipeline` | Level of running-pipelinerun metrics is pipeline and pipelinerun label isn't present in the metrics |
-| metrics.running-pipelinerun.level | `namespace` | Level of running-pipelinerun metrics is namespace, pipeline and pipelinerun label isn't present in the metrics |
-| metrics.running-pipelinerun.level | `` | Level of running-pipelinerun metrics is cluster, namespace, pipeline and pipelinerun label isn't present in the metrics. |
-| metrics.taskrun.duration-type | `histogram` | `tekton_pipelines_controller_pipelinerun_taskrun_duration_seconds` and `tekton_pipelines_controller_taskrun_duration_seconds` is of type histogram |
+| configmap data | value | description |
+| ---------- | ----------- | ----------- |
+| metrics.taskrun.level | `taskrun` | Level of metrics is taskrun |
+| metrics.taskrun.level | `task` | Level of metrics is task and taskrun label isn't present in the metrics |
+| metrics.taskrun.level | `namespace` | Level of metrics is namespace, and task and taskrun label isn't present in the metrics
+| metrics.pipelinerun.level | `pipelinerun` | Level of metrics is pipelinerun |
+| metrics.pipelinerun.level | `pipeline` | Level of metrics is pipeline and pipelinerun label isn't present in the metrics |
+| metrics.pipelinerun.level | `namespace` | Level of metrics is namespace, pipeline and pipelinerun label isn't present in the metrics |
+| metrics.taskrun.duration-type | `histogram` | `tekton_pipelines_controller_pipelinerun_taskrun_duration_seconds` and `tekton_pipelines_controller_taskrun_duration_seconds` is of type histogram |
| metrics.taskrun.duration-type | `lastvalue` | `tekton_pipelines_controller_pipelinerun_taskrun_duration_seconds` and `tekton_pipelines_controller_taskrun_duration_seconds` is of type gauge or lastvalue |
-| metrics.pipelinerun.duration-type | `histogram` | `tekton_pipelines_controller_pipelinerun_duration_seconds` is of type histogram |
-| metrics.pipelinerun.duration-type | `lastvalue` | `tekton_pipelines_controller_pipelinerun_duration_seconds` is of type gauge or lastvalue |
-| metrics.count.enable-reason | `false` | Sets if the `reason` label should be included on count metrics |
-| metrics.taskrun.throttle.enable-namespace | `false` | Sets if the `namespace` label should be included on the `tekton_pipelines_controller_running_taskruns_throttled_by_quota` metric |
+| metrics.pipelinerun.duration-type | `histogram` | `tekton_pipelines_controller_pipelinerun_duration_seconds` is of type histogram |
+| metrics.pipelinerun.duration-type | `lastvalue` | `tekton_pipelines_controller_pipelinerun_duration_seconds` is of type gauge or lastvalue |
+| metrics.count.enable-reason | `false` | Sets if the `reason` label should be included on count metrics |
Histogram value isn't available when pipelinerun or taskrun labels are selected. The Lastvalue or Gauge will be provided. Histogram would serve no purpose because it would generate a single bar. TaskRun and PipelineRun level metrics aren't recommended because they lead to an unbounded cardinality which degrades the observability database.
diff --git a/upstream/docs/migrating-v1beta1-to-v1.md b/upstream/docs/migrating-v1beta1-to-v1.md
index 925488d9135..badb5503b2f 100644
--- a/upstream/docs/migrating-v1beta1-to-v1.md
+++ b/upstream/docs/migrating-v1beta1-to-v1.md
@@ -50,9 +50,6 @@ In Tekton `v1`, the following fields have been changed:
`PipelineResources` and the `resources` fields of Task, TaskRun, Pipeline and PipelineRun have been removed. Please use `Tasks` instead. For more information, see [Replacing PipelineResources](https://github.com/tektoncd/pipeline/blob/main/docs/pipelineresources.md)
## Replacing `taskRef.bundle` and `pipelineRef.bundle` with Bundle Resolver
-
-**Note: `taskRef.bundle` and `pipelineRef.bundle` are now removed from `v1beta1`. This is kept for "history" purposes**.
-
Bundle resolver in remote resolution should be used instead of `taskRun.spec.taskRef.bundle` and `pipelineRun.spec.pipelineRef.bundle`.
The [`enable-bundles-resolver`](https://github.com/tektoncd/pipeline/blob/main/docs/install.md#customizing-the-pipelines-controller-behavior) feature flag must be enabled to use this feature.
diff --git a/upstream/docs/pipeline-api.md b/upstream/docs/pipeline-api.md
index d013bca86cb..c8e57271de6 100644
--- a/upstream/docs/pipeline-api.md
+++ b/upstream/docs/pipeline-api.md
@@ -304,22 +304,6 @@ resource being requested. For example: repo URL, commit SHA,
path to file, the kind of authentication to leverage, etc.
-
-
-url
-
-string
-
-
-
-(Optional)
-
URL is the runtime url passed to the resolver
-to help it figure out how to resolver the resource being
-requested.
-This is currently at an ALPHA stability level and subject to
-alpha API compatibility policies.
-
-
@@ -374,22 +358,6 @@ resource being requested. For example: repo URL, commit SHA,
path to file, the kind of authentication to leverage, etc.
-
-
-url
-
-string
-
-
-
-(Optional)
-
URL is the runtime url passed to the resolver
-to help it figure out how to resolver the resource being
-requested.
-This is currently at an ALPHA stability level and subject to
-alpha API compatibility policies.
Artifacts represents the collection of input and output artifacts associated with
a task run or a similar process. Artifacts in this context are units of data or resources
@@ -1899,7 +1852,7 @@ If Enum is not set, no input validation is performed for the param.
Provenance contains metadata about resources used in the TaskRun/PipelineRun
@@ -4147,23 +4100,6 @@ other Step or Sidecar that does not also request this Workspace will
not have access to it.
RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
-initContainer and must have it’s policy set to “Always”. It is currently
-left optional to help support Kubernetes versions prior to 1.29 when this feature
-was introduced.
-
-
SidecarState
@@ -4679,20 +4615,6 @@ It cannot be used when referencing StepActions using [v1.Step.Ref].
The Results declared by the StepActions will be stored here instead.
-
StepAction represents the actionable components of Step.
-The Step can only reference it from the cluster or using remote resolution.
+
Task represents a collection of sequential steps that are run as part of a
+Pipeline using a set of inputs and producing a set of outputs. Tasks execute
+when TaskRuns are created that provide the input parameters and resources and
+output resources the Task requires.
Params is a list of input parameters required to run the task. Params
+must be supplied as inputs in TaskRuns unless they declare a default
+value.
-command
+displayName
-[]string
+string
(Optional)
-
Entrypoint array. Not executed within a shell.
-The image’s ENTRYPOINT is used if this is not provided.
-Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. “$$(VAR_NAME)” will
-produce the string literal “$(VAR_NAME)”. Escaped references will never be expanded, regardless
-of whether the variable exists or not. Cannot be updated.
-More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+
DisplayName is a user-facing name of the task that may be
+used to populate a UI.
-args
+description
-[]string
+string
(Optional)
-
Arguments to the entrypoint.
-The image’s CMD is used if this is not provided.
-Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. “$$(VAR_NAME)” will
-produce the string literal “$(VAR_NAME)”. Escaped references will never be expanded, regardless
-of whether the variable exists or not. Cannot be updated.
-More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+
Description is a user-facing description of the task that may be
+used to populate a UI.
Step’s working directory.
-If not specified, the container runtime’s default will be used, which
-might be configured in the container image.
-Cannot be updated.
+
Volumes is a collection of volumes that are available to mount into the
+steps of the build.
SecurityContext defines the security options the Step should be run with.
-If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
-More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-The value set in StepAction will take precedence over the value from Task.
+
Workspaces are the volumes that this Task requires.
Volumes to mount into the Step’s filesystem.
-Cannot be updated.
+
Results are values that this Task can output
@@ -9541,14 +9400,13 @@ Cannot be updated.
-
Task
+
TaskRun
-
Task represents a collection of sequential steps that are run as part of a
-Pipeline using a set of inputs and producing a set of outputs. Tasks execute
-when TaskRuns are created that provide the input parameters and resources and
-output resources the Task requires.
-
Deprecated: Please use v1.Task instead.
+
TaskRun represents a single execution of a Task. TaskRuns are how the steps
+specified in a Task are executed; they specify the parameters and resources
+used to run the steps in a Task.
TaskRun represents a single execution of a Task. TaskRuns are how the steps
-specified in a Task are executed; they specify the parameters and resources
-used to run the steps in a Task.
Indicate if the artifact is a build output or a by-product
+
The artifact’s identifying category name
@@ -11233,8 +10870,7 @@ string
(Optional)
Bundle url reference to a Tekton Bundle.
-
Deprecated: Please use ResolverRef with the bundles resolver instead.
-The field is staying there for go client backward compatibility, but is not used/allowed anymore.
+
Deprecated: Please use ResolverRef with the bundles resolver instead.
Provenance contains metadata about resources used in the TaskRun/PipelineRun
@@ -13523,23 +13159,6 @@ other Step or Sidecar that does not also request this Workspace will
not have access to it.
RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
-initContainer and must have it’s policy set to “Always”. It is currently
-left optional to help support Kubernetes versions prior to 1.29 when this feature
-was introduced.
-
-
SidecarState
@@ -14181,209 +13800,6 @@ It cannot be used when referencing StepActions using [v1beta1.Step.Ref].
The Results declared by the StepActions will be stored here instead.
-
Entrypoint array. Not executed within a shell.
-The image’s ENTRYPOINT is used if this is not provided.
-Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. “$$(VAR_NAME)” will
-produce the string literal “$(VAR_NAME)”. Escaped references will never be expanded, regardless
-of whether the variable exists or not. Cannot be updated.
-More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
-
-
-
-
-args
-
-[]string
-
-
-
-(Optional)
-
Arguments to the entrypoint.
-The image’s CMD is used if this is not provided.
-Variable references $(VAR_NAME) are expanded using the container’s environment. If a variable
-cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
-to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. “$$(VAR_NAME)” will
-produce the string literal “$(VAR_NAME)”. Escaped references will never be expanded, regardless
-of whether the variable exists or not. Cannot be updated.
-More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
List of environment variables to set in the container.
-Cannot be updated.
-
-
-
-
-script
-
-string
-
-
-
-(Optional)
-
Script is the contents of an executable file to execute.
-
If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
-
-
-
-
-workingDir
-
-string
-
-
-
-(Optional)
-
Step’s working directory.
-If not specified, the container runtime’s default will be used, which
-might be configured in the container image.
-Cannot be updated.
SecurityContext defines the security options the Step should be run with.
-If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
-More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-The value set in StepAction will take precedence over the value from Task.
Deprecated: Please use ResolverRef with the bundles resolver instead.
-The field is staying there for go client backward compatibility, but is not used/allowed anymore.
+
Deprecated: Please use ResolverRef with the bundles resolver instead.
WhenExpressions are used to specify whether a Task should be executed or skipped
diff --git a/upstream/docs/pipelineruns.md b/upstream/docs/pipelineruns.md
index 6507066cde7..716bbb040b3 100644
--- a/upstream/docs/pipelineruns.md
+++ b/upstream/docs/pipelineruns.md
@@ -158,6 +158,10 @@ A `Tekton Bundle` is an OCI artifact that contains Tekton resources like `Tasks`
You can reference a `Tekton bundle` in a `TaskRef` in both `v1` and `v1beta1` using [remote resolution](./bundle-resolver.md#pipeline-resolution). The example syntax shown below for `v1` uses remote resolution and requires enabling [beta features](./additional-configs.md#beta-features).
+In `v1beta1`, you can also reference a `Tekton bundle` using OCI bundle syntax, which has been deprecated in favor of remote resolution. The example shown below for `v1beta1` uses OCI bundle syntax, and requires enabling `enable-tekton-oci-bundles: "true"` feature flag.
+
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
pipelineRef:
@@ -170,6 +174,17 @@ spec:
- name: kind
value: Pipeline
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+ ```yaml
+ spec:
+ pipelineRef:
+ name: mypipeline
+ bundle: docker.io/myrepo/mycatalog:v1.0
+ ```
+{{% /tab %}}
+{{< /tabs >}}
The syntax and caveats are similar to using `Tekton Bundles` for `Task` references
in [Pipelines](pipelines.md#tekton-bundles) or [TaskRuns](taskruns.md#tekton-bundles).
diff --git a/upstream/docs/pipelines.md b/upstream/docs/pipelines.md
index a8fab09577b..ebc0b2f9b0b 100644
--- a/upstream/docs/pipelines.md
+++ b/upstream/docs/pipelines.md
@@ -369,45 +369,6 @@ any resolved `param` value against the `enum` specified in each `PipelineTask` b
See usage in this [example](../examples/v1/pipelineruns/alpha/param-enum.yaml)
-#### Propagated Params
-
-Like with embedded [pipelineruns](pipelineruns.md#propagated-parameters), you can propagate `params` declared in the `pipeline` down to the inlined `pipelineTasks` and its inlined `Steps`. Wherever a resource (e.g. a `pipelineTask`) or a `StepAction` is referenced, the parameters need to be passed explicitly.
-
-For example, the following is a valid yaml.
-
-```yaml
-apiVersion: tekton.dev/v1 # or tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: pipelien-propagated-params
-spec:
- params:
- - name: HELLO
- default: "Hello World!"
- - name: BYE
- default: "Bye World!"
- tasks:
- - name: echo-hello
- taskSpec:
- steps:
- - name: echo
- image: ubuntu
- script: |
- #!/usr/bin/env bash
- echo "$(params.HELLO)"
- - name: echo-bye
- taskSpec:
- steps:
- - name: echo-action
- ref:
- name: step-action-echo
- params:
- - name: msg
- value: "$(params.BYE)"
-```
-The same rules defined in [pipelineruns](pipelineruns.md#propagated-parameters) apply here.
-
-
## Adding `Tasks` to the `Pipeline`
Your `Pipeline` definition must reference at least one [`Task`](tasks.md).
@@ -624,24 +585,45 @@ There is currently a hard limit of 20 objects in a bundle.
You can reference a `Tekton bundle` in a `TaskRef` in both `v1` and `v1beta1` using [remote resolution](./bundle-resolver.md#pipeline-resolution). The example syntax shown below for `v1` uses remote resolution and requires enabling [beta features](./additional-configs.md#beta-features).
+In `v1beta1`, you can also reference a `Tekton bundle` using OCI bundle syntax, which has been deprecated in favor of remote resolution. The example shown below for `v1beta1` uses OCI bundle syntax, and requires enabling `enable-tekton-oci-bundles: "true"` feature flag.
+
+
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
+```yaml
+spec:
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: docker.io/myrepo/mycatalog
+ - name: name
+ value: echo-task
+ - name: kind
+ value: Task
+```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
```yaml
spec:
tasks:
- name: hello-world
taskRef:
- resolver: bundles
- params:
- - name: bundle
- value: docker.io/myrepo/mycatalog
- - name: name
- value: echo-task
- - name: kind
- value: Task
+ name: echo-task
+ bundle: docker.com/myrepo/mycatalog
```
+{{% /tab %}}
+{{< /tabs >}}
+
+Here, the `bundle` field is the full reference url to the artifact. The name is the
+`metadata.name` field of the `Task`.
You may also specify a `tag` as you would with a Docker image which will give you a fixed,
repeatable reference to a `Task`.
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
taskRef:
@@ -654,9 +636,24 @@ spec:
- name: kind
value: Task
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+```yaml
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ name: echo-task
+ bundle: docker.com/myrepo/mycatalog:v1.0.1
+```
+{{% /tab %}}
+{{< /tabs >}}
You may also specify a fixed digest instead of a tag.
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
taskRef:
@@ -669,6 +666,19 @@ spec:
- name: kind
value: Task
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+```yaml
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ name: echo-task
+ bundle: docker.io/myrepo/mycatalog@sha256:abc123
+```
+{{% /tab %}}
+{{< /tabs >}}
Any of the above options will fetch the image using the `ImagePullSecrets` attached to the
`ServiceAccount` specified in the `PipelineRun`.
@@ -741,6 +751,8 @@ tasks:
### Using the `onError` field
+> :seedling: **Specifying `onError` in `PipelineTasks` is an [alpha](additional-configs.md#alpha-features) feature.** The `enable-api-fields` feature flag must be set to `"alpha"` to specify `onError` in a `PipelineTask`.
+
When a `PipelineTask` fails, the rest of the `PipelineTasks` are skipped and the `PipelineRun` is declared a failure. If you would like to
ignore such `PipelineTask` failure and continue executing the rest of the `PipelineTasks`, you can specify `onError` for such a `PipelineTask`.
@@ -1265,10 +1277,6 @@ Tasks can emit [`Results`](tasks.md#emitting-results) when they execute. A Pipel
1. A Pipeline can pass the `Result` of a `Task` into the `Parameters` or `when` expressions of another.
2. A Pipeline can itself emit `Results` and include data from the `Results` of its Tasks.
-> **Note** Tekton does not enforce that results are produced at Task level. If a pipeline attempts to
-> consume a result that was declared by a Task, but not produced, it will fail. [TEP-0048](https://github.com/tektoncd/community/blob/main/teps/0048-task-results-without-results.md)
-> propopses introducing default values for results to help Pipeline authors manage this case.
-
### Passing one Task's `Results` into the `Parameters` or `when` expressions of another
Sharing `Results` between `Tasks` in a `Pipeline` happens via
diff --git a/upstream/docs/podtemplates.md b/upstream/docs/podtemplates.md
index 53bb70ca6e3..af0888d4bb9 100644
--- a/upstream/docs/podtemplates.md
+++ b/upstream/docs/podtemplates.md
@@ -23,6 +23,20 @@ See the following for examples of specifying a Pod template:
- [Specifying a Pod template for a `TaskRun`](./taskruns.md#specifying-a-pod-template)
- [Specifying a Pod template for a `PipelineRun`](./pipelineruns.md#specifying-a-pod-template)
+## Affinity Assistant Pod templates
+
+The Pod templates specified in the `TaskRuns` and `PipelineRuns `also apply to
+the [affinity assistant Pods](#./workspaces.md#specifying-workspace-order-in-a-pipeline-and-affinity-assistants)
+that are created when using Workspaces, but only on select fields.
+
+The supported fields are: `tolerations`, `nodeSelector`, and
+`imagePullSecrets` (see the table below for more details).
+
+Similarily to Pod templates, you have the option to define a global affinity
+assistant Pod template [in your Tekton config](./additional-configs.md#customizing-basic-execution-parameters)
+using the key `default-affinity-assistant-pod-template`. The merge strategy is
+the same as the one described above.
+
## Supported fields
Pod templates support fields listed in the table below.
@@ -142,20 +156,6 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
```
-# Affinity Assistant Pod templates
-
-The Pod templates specified in the `TaskRuns` and `PipelineRuns `also apply to
-the [affinity assistant Pods](#./workspaces.md#specifying-workspace-order-in-a-pipeline-and-affinity-assistants)
-that are created when using Workspaces, but only on selected fields.
-
-The supported fields for affinity assistant pods are: `tolerations`, `nodeSelector`, `securityContext`,
-`priorityClassName` and `imagePullSecrets` (see the table above for more details about the fields).
-
-Similarly to global Pod Template, you have the option to define a global affinity
-assistant Pod template [in your Tekton config](./additional-configs.md#customizing-basic-execution-parameters)
-using the key `default-affinity-assistant-pod-template`. The merge strategy is
-the same as the one described above for the supported fields.
-
---
Except as otherwise noted, the content of this page is licensed under the
diff --git a/upstream/docs/resolution-getting-started.md b/upstream/docs/resolution-getting-started.md
index 590fa3d828e..f27ea7894a5 100644
--- a/upstream/docs/resolution-getting-started.md
+++ b/upstream/docs/resolution-getting-started.md
@@ -18,7 +18,7 @@ with a Pipeline stored in a git repo.
- A computer with
[`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl).
-- A Kubernetes cluster running at least Kubernetes 1.28. A [`kind`
+- A Kubernetes cluster running at least Kubernetes 1.27. A [`kind`
cluster](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)
should work fine for following the guide on your local machine.
- An image registry that you can push images to. If you're using `kind`
@@ -125,7 +125,7 @@ EOF
kubectl apply -f ./pipelinerun.yaml
```
-## Step 4: Monitor the PipelineRun
+## Step 6: Monitor the PipelineRun
First let's watch the PipelineRun to see if it succeeds:
diff --git a/upstream/docs/resolver-reference.md b/upstream/docs/resolver-reference.md
index 9d38bc87606..68fa6fc9273 100644
--- a/upstream/docs/resolver-reference.md
+++ b/upstream/docs/resolver-reference.md
@@ -21,33 +21,13 @@ a resolver](./how-to-write-a-resolver.md).
Implementing this interface is required. It provides just enough
configuration for the framework to get a resolver running.
-{{% tabs %}}
-
-{{% tab "Upgraded Framework" %}}
-
-| Method to Implement | Description |
-|----------------------|-------------|
-| Initialize | Use this method to perform any setup required before the resolver starts receiving requests. |
-| GetName | Use this method to return a name to refer to your Resolver by. e.g. `"Git"` |
-| GetSelector | Use this method to specify the labels that a resolution request must have to be routed to your resolver. |
-| Validate | Use this method to validate the resolution Spec given to your resolver. |
-| Resolve | Use this method to perform get the resource based on the ResolutionRequestSpec as input and return it, along with any metadata about it in annotations |
-
-{{% /tab %}}
-
-{{% tab "Previous Framework (Deprecated)" %}}
-
| Method to Implement | Description |
|----------------------|-------------|
| Initialize | Use this method to perform any setup required before the resolver starts receiving requests. |
| GetName | Use this method to return a name to refer to your Resolver by. e.g. `"Git"` |
| GetSelector | Use this method to specify the labels that a resolution request must have to be routed to your resolver. |
-| ValidateParams | Use this method to validate the params given to your resolver. |
-| Resolve | Use this method to perform get the resource based on params as input and return it, along with any metadata about it in annotations |
-
-{{% /tab %}}
-
-{{% /tabs %}}
+| ValidateParams | Use this method to validate the parameters given to your resolver. |
+| Resolve | Use this method to perform get the resource and return it, along with any metadata about it in annotations |
## The `ConfigWatcher` Interface
@@ -58,7 +38,7 @@ api endpoints or base urls, service account names to use, etc...
| Method to Implement | Description |
|---------------------|-------------|
-| GetConfigName | Use this method to return the name of the configmap admins will use to configure this resolver. Once this interface is implemented your `Validate` and `Resolve` methods will be able to access your latest resolver configuration by calling `framework.GetResolverConfigFromContext(ctx)`. Note that this configmap must exist when your resolver starts - put a default one in your resolver's `config/` directory. |
+| GetConfigName | Use this method to return the name of the configmap admins will use to configure this resolver. Once this interface is implemented your `ValidateParams` and `Resolve` methods will be able to access your latest resolver configuration by calling `framework.GetResolverConfigFromContext(ctx)`. Note that this configmap must exist when your resolver starts - put a default one in your resolver's `config/` directory. |
## The `TimedResolution` Interface
diff --git a/upstream/docs/resolver-template/README.md b/upstream/docs/resolver-template/README.md
index 1469403a20f..fbd6a8b5355 100644
--- a/upstream/docs/resolver-template/README.md
+++ b/upstream/docs/resolver-template/README.md
@@ -19,19 +19,11 @@ You can use this as a template to quickly get a new Resolver up and
running with your own preferred storage backend.
To reuse the template, simply copy this entire subdirectory to a new
-directory.
-
-The entire program for the `latest` framework is defined in
-[`./cmd/resolver/main.go`](./cmd/resolver/main.go) and provides stub
-implementations of all the methods defined by the [`framework.Resolver`
-interface](../../pkg/remoteresolution/resolver/framework/interface.go).
-
-If you choose to use the previous framework (deprecated) is defined in
+directory. The entire program is defined in
[`./cmd/demoresolver/main.go`](./cmd/demoresolver/main.go) and provides stub
implementations of all the methods defined by the [`framework.Resolver`
interface](../../pkg/resolution/resolver/framework/interface.go).
-
Once copied you'll need to run `go mod init` and `go mod tidy` at the root
of your project. We don't need this in `tektoncd/resolution` because this
submodule relies on the `go.mod` and `go.sum` defined at the root of the repo.
diff --git a/upstream/docs/resolver-template/cmd/demoresolver/main.go b/upstream/docs/resolver-template/cmd/demoresolver/main.go
index 881be8ba961..4c11d7164c2 100644
--- a/upstream/docs/resolver-template/cmd/demoresolver/main.go
+++ b/upstream/docs/resolver-template/cmd/demoresolver/main.go
@@ -32,7 +32,6 @@ func main() {
)
}
-// Deprecated
type resolver struct{}
// Initialize sets up any dependencies needed by the resolver. None atm.
diff --git a/upstream/docs/resolver-template/cmd/resolver/main.go b/upstream/docs/resolver-template/cmd/resolver/main.go
deleted file mode 100644
index 1484b62a0d7..00000000000
--- a/upstream/docs/resolver-template/cmd/resolver/main.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "context"
- "errors"
- "fmt"
- neturl "net/url"
-
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- frameworkV1 "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered"
- "knative.dev/pkg/injection/sharedmain"
-)
-
-func main() {
- ctx := filteredinformerfactory.WithSelectors(context.Background(), v1beta1.ManagedByLabelKey)
- sharedmain.MainWithContext(ctx, "controller",
- framework.NewController(ctx, &resolver{}),
- )
-}
-
-type resolver struct{}
-
-// Initialize sets up any dependencies needed by the resolver. None atm.
-func (r *resolver) Initialize(context.Context) error {
- return nil
-}
-
-// GetName returns a string name to refer to this resolver by.
-func (r *resolver) GetName(context.Context) string {
- return "Demo"
-}
-
-// GetSelector returns a map of labels to match requests to this resolver.
-func (r *resolver) GetSelector(context.Context) map[string]string {
- return map[string]string{
- common.LabelKeyResolverType: "demo",
- }
-}
-
-// Validate ensures resolution spec from a request is as expected.
-func (r *resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return errors.New("no params allowed")
- }
- u, err := neturl.ParseRequestURI(req.URL)
- if err != nil {
- return err
- }
- if u.Scheme != "demoscheme" {
- return fmt.Errorf("Invalid Scheme. Want %s, Got %s", "demoscheme", u.Scheme)
- }
- return nil
-}
-
-// Resolve uses the given resolution spec to resolve the requested file or resource.
-func (r *resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (frameworkV1.ResolvedResource, error) {
- return &myResolvedResource{}, nil
-}
-
-// our hard-coded resolved file to return
-const pipeline = `
-apiVersion: tekton.dev/v1
-kind: Pipeline
-metadata:
- name: my-pipeline
-spec:
- tasks:
- - name: hello-world
- taskSpec:
- steps:
- - image: alpine:3.15.1
- script: |
- echo "hello world"
-`
-
-// myResolvedResource wraps the data we want to return to Pipelines
-type myResolvedResource struct{}
-
-// Data returns the bytes of our hard-coded Pipeline
-func (*myResolvedResource) Data() []byte {
- return []byte(pipeline)
-}
-
-// Annotations returns any metadata needed alongside the data. None atm.
-func (*myResolvedResource) Annotations() map[string]string {
- return nil
-}
-
-// RefSource is the source reference of the remote data that records where the remote
-// file came from including the url, digest and the entrypoint. None atm.
-func (*myResolvedResource) RefSource() *pipelinev1.RefSource {
- return nil
-}
diff --git a/upstream/docs/resolver-template/cmd/resolver/main_test.go b/upstream/docs/resolver-template/cmd/resolver/main_test.go
deleted file mode 100644
index 46c6eedb160..00000000000
--- a/upstream/docs/resolver-template/cmd/resolver/main_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- Copyright 2024 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package main
-
-import (
- "encoding/base64"
- "errors"
- "testing"
- "time"
-
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/test"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- v1 "knative.dev/pkg/apis/duck/v1"
- _ "knative.dev/pkg/system/testing"
-)
-
-func TestResolver(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- r := &resolver{}
-
- request := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: "demo",
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: "demoscheme://foo/bar",
- },
- }
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
-
- expectedStatus := &v1beta1.ResolutionRequestStatus{
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString([]byte(pipeline)),
- },
- }
-
- // If you want to test scenarios where an error should occur, pass a non-nil error to RunResolverReconcileTest
- var expectedErr error
-
- frtesting.RunResolverReconcileTest(ctx, t, d, r, request, expectedStatus, expectedErr)
-}
-
-func TestResolver_Failure_Wrong_Scheme(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- r := &resolver{}
-
- request := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: "demo",
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: "wrongscheme://foo/bar",
- },
- }
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
-
- expectedStatus := &v1beta1.ResolutionRequestStatus{
- Status: v1.Status{
- Conditions: v1.Conditions{
- {
- Type: "Succeeded",
- Status: "False",
- Reason: "ResolutionFailed",
- Message: `invalid resource request "foo/rr": Invalid Scheme. Want demoscheme, Got wrongscheme`,
- },
- },
- },
- }
-
- // If you want to test scenarios where an error should occur, pass a non-nil error to RunResolverReconcileTest
- expectedErr := errors.New(`invalid resource request "foo/rr": Invalid Scheme. Want demoscheme, Got wrongscheme`)
- frtesting.RunResolverReconcileTest(ctx, t, d, r, request, expectedStatus, expectedErr)
-}
-
-func TestResolver_Failure_InvalidUrl(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- r := &resolver{}
-
- request := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: "demo",
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: "foo/bar",
- },
- }
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
-
- expectedStatus := &v1beta1.ResolutionRequestStatus{
- Status: v1.Status{
- Conditions: v1.Conditions{
- {
- Type: "Succeeded",
- Status: "False",
- Reason: "ResolutionFailed",
- Message: `invalid resource request "foo/rr": parse "foo/bar": invalid URI for request`,
- },
- },
- },
- }
-
- // If you want to test scenarios where an error should occur, pass a non-nil error to RunResolverReconcileTest
- expectedErr := errors.New(`invalid resource request "foo/rr": parse "foo/bar": invalid URI for request`)
- frtesting.RunResolverReconcileTest(ctx, t, d, r, request, expectedStatus, expectedErr)
-}
-
-func TestResolver_Failure_InvalidParams(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- r := &resolver{}
-
- request := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: "demo",
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: "foo",
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- }
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
-
- expectedStatus := &v1beta1.ResolutionRequestStatus{
- Status: v1.Status{
- Conditions: v1.Conditions{
- {
- Type: "Succeeded",
- Status: "False",
- Reason: "ResolutionFailed",
- Message: `invalid resource request "foo/rr": no params allowed`,
- },
- },
- },
- }
-
- // If you want to test scenarios where an error should occur, pass a non-nil error to RunResolverReconcileTest
- expectedErr := errors.New(`invalid resource request "foo/rr": no params allowed`)
- frtesting.RunResolverReconcileTest(ctx, t, d, r, request, expectedStatus, expectedErr)
-}
diff --git a/upstream/docs/stepactions.md b/upstream/docs/stepactions.md
index d78932fc283..afaa35ce003 100644
--- a/upstream/docs/stepactions.md
+++ b/upstream/docs/stepactions.md
@@ -18,12 +18,11 @@ weight: 201
- [Declaring VolumeMounts](#declaring-volumemounts)
- [Referencing a StepAction](#referencing-a-stepaction)
- [Specifying Remote StepActions](#specifying-remote-stepactions)
- - [Controlling Step Execution with when Expressions](#controlling-step-execution-with-when-expressions)
- [Known Limitations](#known-limitations)
- [Cannot pass Step Results between Steps](#cannot-pass-step-results-between-steps)
## Overview
-> :seedling: **`StepActions` is an [beta](additional-configs.md#beta-features) feature.**
+> :seedling: **`StepActions` is an [alpha](additional-configs.md#alpha-features) feature.**
> The `enable-step-actions` feature flag must be set to `"true"` to specify a `StepAction` in a `Step`.
A `StepAction` is the reusable and scriptable unit of work that is performed by a `Step`.
@@ -63,7 +62,7 @@ A `StepAction` definition supports the following fields:
The example below demonstrates the use of most of the above-mentioned fields:
```yaml
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: example-stepaction-name
@@ -83,7 +82,7 @@ Like with `Tasks`, a `StepAction` must declare all the parameters that it uses.
`Parameters` are passed to the `StepAction` from its corresponding `Step` referencing it.
```yaml
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: stepaction-using-params
@@ -522,104 +521,3 @@ spec:
```
The default resolver type can be configured by the `default-resolver-type` field in the `config-defaults` ConfigMap (`alpha` feature). See [additional-configs.md](./additional-configs.md) for details.
-
-### Controlling Step Execution with when Expressions
-
-You can define `when` in a `step` to control its execution.
-
-The components of `when` expressions are `input`, `operator`, `values`, `cel`:
-
-| Component | Description | Syntax |
-|------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `input` | Input for the `when` expression, defaults to an empty string if not provided. | * Static values e.g. `"ubuntu"` * Variables (parameters or results) e.g. `"$(params.image)"` or `"$(tasks.task1.results.image)"` or `"$(tasks.task1.results.array-results[1])"` |
-| `operator` | `operator` represents an `input`'s relationship to a set of `values`, a valid `operator` must be provided. | `in` or `notin` |
-| `values` | An array of string values, the `values` array must be provided and has to be non-empty. | * An array param e.g. `["$(params.images[*])"]` * An array result of a task `["$(tasks.task1.results.array-results[*])"]` * An array result of a step`["(steps.step1.results.array-results[*])"]` * `values` can contain static values e.g. `"ubuntu"` * `values` can contain variables (parameters or results) or a Workspaces's `bound` state e.g. `["$(params.image)"]` or `["$(steps.step1.results.image)"]` or `["$(tasks.task1.results.array-results[1])"]` or `["$(steps.step1.results.array-results[1])"]` |
-| `cel` | The Common Expression Language (CEL) implements common semantics for expression evaluation, enabling different applications to more easily interoperate. This is an `alpha` feature, `enable-cel-in-whenexpression` needs to be set to true to use this feature. | [cel-syntax](https://github.com/google/cel-spec/blob/master/doc/langdef.md#syntax)
-
-The below example shows how to use when expressions to control step executions:
-
-```yaml
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: my-pvc-2
-spec:
- resources:
- requests:
- storage: 5Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteOnce
----
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: step-when-example
-spec:
- workspaces:
- - name: custom
- persistentVolumeClaim:
- claimName: my-pvc-2
- taskSpec:
- description: |
- A simple task that shows how to use when determine if a step should be executed
- steps:
- - name: should-execute
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- echo "executed..."
- when:
- - input: "$(workspaces.custom.bound)"
- operator: in
- values: [ "true" ]
- - name: should-skip
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- echo skipskipskip
- when:
- - input: "$(workspaces.custom2.bound)"
- operator: in
- values: [ "true" ]
- - name: should-continue
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- echo blabalbaba
- - name: produce-step
- image: alpine
- results:
- - name: result2
- type: string
- script: |
- echo -n "foo" | tee $(step.results.result2.path)
- - name: run-based-on-step-results
- image: alpine
- script: |
- echo "wooooooo"
- when:
- - input: "$(steps.produce-step.results.result2)"
- operator: in
- values: [ "bar" ]
- workspaces:
- - name: custom
-```
-
-The StepState for a skipped step looks like something similar to the below:
-```yaml
- {
- "container": "step-run-based-on-step-results",
- "imageID": "docker.io/library/alpine@sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b",
- "name": "run-based-on-step-results",
- "terminated": {
- "containerID": "containerd://bf81162e79cf66a2bbc03e3654942d3464db06ff368c0be263a8a70f363a899b",
- "exitCode": 0,
- "finishedAt": "2024-03-26T03:57:47Z",
- "reason": "Completed",
- "startedAt": "2024-03-26T03:57:47Z"
- },
- "terminationReason": "Skipped"
- }
-```
-Where `terminated.exitCode` is `0` and `terminationReason` is `Skipped` to indicate the Step exited successfully and was skipped.
\ No newline at end of file
diff --git a/upstream/docs/taskruns.md b/upstream/docs/taskruns.md
index 1dd4d040a6c..ae93416eb5e 100644
--- a/upstream/docs/taskruns.md
+++ b/upstream/docs/taskruns.md
@@ -120,6 +120,10 @@ A `Tekton Bundle` is an OCI artifact that contains Tekton resources like `Tasks`
You can reference a `Tekton bundle` in a `TaskRef` in both `v1` and `v1beta1` using [remote resolution](./bundle-resolver.md#pipeline-resolution). The example syntax shown below for `v1` uses remote resolution and requires enabling [beta features](./additional-configs.md#beta-features).
+In `v1beta1`, you can also reference a `Tekton bundle` using OCI bundle syntax, which has been deprecated in favor of remote resolution. The example shown below for `v1beta1` uses OCI bundle syntax, and requires enabling `enable-tekton-oci-bundles: "true"` feature flag.
+
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
taskRef:
@@ -132,9 +136,25 @@ spec:
- name: kind
value: Task
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+```yaml
+spec:
+taskRef:
+ name: echo-task
+ bundle: docker.io/myrepo/mycatalog
+```
+{{% /tab %}}
+{{< /tabs >}}
+
+Here, the `bundle` field is the full reference url to the artifact. The name is the
+`metadata.name` field of the `Task`.
You may also specify a `tag` as you would with a Docker image which will give you a repeatable reference to a `Task`.
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
taskRef:
@@ -147,9 +167,22 @@ spec:
- name: kind
value: Task
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+```yaml
+spec:
+taskRef:
+ name: echo-task
+ bundle: docker.io/myrepo/mycatalog:v1.0.1
+```
+{{% /tab %}}
+{{< /tabs >}}
You may also specify a fixed digest instead of a tag which ensures the referenced task is constant.
+{{< tabs >}}
+{{% tab "v1 & v1beta1" %}}
```yaml
spec:
taskRef:
@@ -162,6 +195,17 @@ spec:
- name: kind
value: Task
```
+{{% /tab %}}
+
+{{% tab "v1beta1" %}}
+```yaml
+spec:
+taskRef:
+ name: echo-task
+ bundle: docker.io/myrepo/mycatalog@sha256:abc123
+```
+{{% /tab %}}
+{{< /tabs >}}
A working example can be found [here](../examples/v1beta1/taskruns/no-ci/tekton-bundles.yaml).
@@ -374,7 +418,7 @@ may be overridden by a TaskRun's StepSpecs and SidecarSpecs.
### Specifying Task-level `ComputeResources`
-**([beta only](https://github.com/tektoncd/pipeline/blob/main/docs/additional-configs.md#beta-features))**
+**([alpha only](https://github.com/tektoncd/pipeline/blob/main/docs/additional-configs.md#alpha-features))**
Task-level compute resources can be configured in `TaskRun.ComputeResources`, or `PipelineRun.TaskRunSpecs.ComputeResources`.
@@ -599,7 +643,7 @@ and reasons.
### Configuring Task Steps and Sidecars in a TaskRun
-**([beta only](https://github.com/tektoncd/pipeline/blob/main/docs/additional-configs.md#beta-features))**
+**([alpha only](https://github.com/tektoncd/pipeline/blob/main/docs/additional-configs.md#alpha-features))**
A TaskRun can specify `StepSpecs` or `SidecarSpecs` to configure Step or Sidecar
specified in a Task. Only named Steps and Sidecars may be configured.
@@ -802,21 +846,20 @@ steps:
The following tables shows how to read the overall status of a `TaskRun`:
-| `status` | `reason` | `message` | `completionTime` is set | Description |
-|:---------|:-----------------------|:------------------------------------------------------------------|:-----------------------:|--------------------------------------------------------------------------------------------------:|
-| Unknown | Started | n/a | No | The TaskRun has just been picked up by the controller. |
-| Unknown | Pending | n/a | No | The TaskRun is waiting on a Pod in status Pending. |
-| Unknown | Running | n/a | No | The TaskRun has been validated and started to perform its work. |
-| Unknown | TaskRunCancelled | n/a | No | The user requested the TaskRun to be cancelled. Cancellation has not been done yet. |
-| True | Succeeded | n/a | Yes | The TaskRun completed successfully. |
-| False | Failed | n/a | Yes | The TaskRun failed because one of the steps failed. |
-| False | \[Error message\] | n/a | No | The TaskRun encountered a non-permanent error, and it's still running. It may ultimately succeed. |
-| False | \[Error message\] | n/a | Yes | The TaskRun failed with a permanent error (usually validation). |
-| False | TaskRunCancelled | n/a | Yes | The TaskRun was cancelled successfully. |
-| False | TaskRunCancelled | TaskRun cancelled as the PipelineRun it belongs to has timed out. | Yes | The TaskRun was cancelled because the PipelineRun timed out. |
-| False | TaskRunTimeout | n/a | Yes | The TaskRun timed out. |
-| False | TaskRunImagePullFailed | n/a | Yes | The TaskRun failed due to one of its steps not being able to pull the image. |
-| False | FailureIgnored | n/a | Yes | The TaskRun failed but the failure was ignored. |
+`status` | `reason` | `message` | `completionTime` is set | Description
+:--------|:-----------------------|:------------------------------------------------------------------|:-----------------------:|-------------------------------------------------------------------------------------------------:
+Unknown | Started | n/a | No | The TaskRun has just been picked up by the controller.
+Unknown | Pending | n/a | No | The TaskRun is waiting on a Pod in status Pending.
+Unknown | Running | n/a | No | The TaskRun has been validated and started to perform its work.
+Unknown | TaskRunCancelled | n/a | No | The user requested the TaskRun to be cancelled. Cancellation has not been done yet.
+True | Succeeded | n/a | Yes | The TaskRun completed successfully.
+False | Failed | n/a | Yes | The TaskRun failed because one of the steps failed.
+False | \[Error message\] | n/a | No | The TaskRun encountered a non-permanent error, and it's still running. It may ultimately succeed.
+False | \[Error message\] | n/a | Yes | The TaskRun failed with a permanent error (usually validation).
+False | TaskRunCancelled | n/a | Yes | The TaskRun was cancelled successfully.
+False | TaskRunCancelled | TaskRun cancelled as the PipelineRun it belongs to has timed out. | Yes | The TaskRun was cancelled because the PipelineRun timed out.
+False | TaskRunTimeout | n/a | Yes | The TaskRun timed out.
+False | TaskRunImagePullFailed | n/a | Yes | The TaskRun failed due to one of its steps not being able to pull the image.
When a `TaskRun` changes status, [events](events.md#taskruns) are triggered accordingly.
@@ -909,18 +952,6 @@ spec:
onFailure: "enabled"
```
-### Breakpoint before step
-
-If you want to set a breakpoint before the step is executed, you can add the step name to the `beforeSteps` field in the following way:
-
-```yaml
-spec:
- debug:
- breakpoints:
- beforeSteps:
- - {{ stepName }}
-```
-
Upon failure of a step, the TaskRun Pod execution is halted. If this TaskRun Pod continues to run without any lifecycle
change done by the user (running the debug-continue or debug-fail-continue script) the TaskRun would be subject to
[TaskRunTimeout](#configuring-the-failure-timeout).
@@ -943,10 +974,6 @@ perform :-
`debug-fail-continue`: Mark the step as a failure and exit the breakpoint.
-`debug-beforestep-continue`: Mark the step continue to execute
-
-`debug-beforestep-fail-continue`: Mark the step not continue to execute
-
*More information on the inner workings of debug can be found in the [Debug documentation](debug.md)*
## Code examples
diff --git a/upstream/docs/tasks.md b/upstream/docs/tasks.md
index d01c29dd915..0529e5ff8ee 100644
--- a/upstream/docs/tasks.md
+++ b/upstream/docs/tasks.md
@@ -858,12 +858,6 @@ precise string you want returned from your `Task` into the result files that you
The stored results can be used [at the `Task` level](./pipelines.md#passing-one-tasks-results-into-the-parameters-or-when-expressions-of-another)
or [at the `Pipeline` level](./pipelines.md#emitting-results-from-a-pipeline).
-> **Note** Tekton does not enforce Task results unless there is a consumer: when a Task declares a result,
-> it may complete successfully even if no result was actually produced. When a Task that declares results is
-> used in a Pipeline, and a component of the Pipeline attempts to consume the Task's result, if the result
-> was not produced the pipeline will fail. [TEP-0048](https://github.com/tektoncd/community/blob/main/teps/0048-task-results-without-results.md)
-> propopses introducing default values for results to help Pipeline authors manage this case.
-
#### Emitting Object `Results`
Emitting a task result of type `object` is implemented based on the
[TEP-0075](https://github.com/tektoncd/community/blob/main/teps/0075-object-param-and-result-types.md#emitting-object-results).
@@ -1023,7 +1017,7 @@ As a general rule-of-thumb, if a result needs to be larger than a kilobyte, you
#### Larger `Results` using sidecar logs
-This is a beta feature which is guarded behind its own feature flag. The `results-from` feature flag must be set to
+This is an alpha feature which is guarded behind its own feature flag. The `results-from` feature flag must be set to
[`"sidecar-logs"`](./install.md#enabling-larger-results-using-sidecar-logs) to enable larger results using sidecar logs.
Instead of using termination messages to store results, the taskrun controller injects a sidecar container which monitors
@@ -1113,11 +1107,6 @@ to run alongside the `Steps` in your `Task`. You can use `Sidecars` to provide a
`Sidecars` spin up before your `Task` executes and are deleted after the `Task` execution completes.
For further information, see [`Sidecars` in `TaskRuns`](taskruns.md#specifying-sidecars).
-**Note**: Starting in v0.62 you can enable native Kubernetes sidecar support using the `enable-kubernetes-sidecar` feature flag ([see instructions](./additional-configs.md#customizing-the-pipelines-controller-behavior)). If kubernetes does not wait for your sidecar application to be ready, use a `startupProbe` to help kubernetes identify when it is ready.
-
-Refer to the detailed instructions listed in [additional config](additional-configs.md#enabling-larger-results-using-sidecar-logs)
-to learn how to enable this feature.
-
In the example below, a `Step` uses a Docker-in-Docker `Sidecar` to build a Docker image:
```yaml
diff --git a/upstream/docs/tekton-controller-flags.md b/upstream/docs/tekton-controller-flags.md
deleted file mode 100644
index 34d62b3cd1d..00000000000
--- a/upstream/docs/tekton-controller-flags.md
+++ /dev/null
@@ -1,82 +0,0 @@
-
-
-# Tekton Controllers flags
-
-The different controllers `tektoncd/pipeline` ships comes with a set of flags
-that can be changed (in the `yaml` payloads) for advanced use cases. This page
-is documenting them.
-
-## Common set of flags
-
-The following flags are available on all "controllers", aka `controller`, `webhook`, `events` and `resolvers`.
-
-```
- -add_dir_header
- If true, adds the file directory to the header of the log messages
- -alsologtostderr
- log to standard error as well as files (no effect when -logtostderr=true)
- -cluster string
- Defaults to the current cluster in kubeconfig.
- -disable-ha
- Whether to disable high-availability functionality for this component. This flag will be deprecated and removed when we have promoted this feature to stable, so do not pass it without filing an issue upstream!
- -kube-api-burst int
- Maximum burst for throttle.
- -kube-api-qps float
- Maximum QPS to the server from the client.
- -kubeconfig string
- Path to a kubeconfig. Only required if out-of-cluster.
- -log_backtrace_at value
- when logging hits line file:N, emit a stack trace
- -log_dir string
- If non-empty, write log files in this directory (no effect when -logtostderr=true)
- -log_file string
- If non-empty, use this log file (no effect when -logtostderr=true)
- -log_file_max_size uint
- Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
- -logtostderr
- log to standard error instead of files (default true)
- -one_output
- If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)
- -server string
- The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.
- -skip_headers
- If true, avoid header prefixes in the log messages
- -skip_log_headers
- If true, avoid headers when opening log files (no effect when -logtostderr=true)
- -stderrthreshold value
- logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false) (default 2)
- -v value
- number for the log level verbosity
- -vmodule value
- comma-separated list of pattern=N settings for file-filtered logging
-```
-
-## `controller`
-
-The main controller binary has additional flags to configure its behavior.
-
-```
- -entrypoint-image string
- The container image containing our entrypoint binary.
- -namespace string
- Namespace to restrict informer to. Optional, defaults to all namespaces.
- -nop-image string
- The container image used to stop sidecars
- -resync-period duration
- The period between two resync run (going through all objects) (default 10h0m0s)
- -shell-image string
- The container image containing a shell
- -shell-image-win string
- The container image containing a windows shell
- -sidecarlogresults-image string
- The container image containing the binary for accessing results.
- -threads-per-controller int
- Threads (goroutines) to create per controller (default 2)
- -workingdirinit-image string
- The container image containing our working dir init binary.
-```
diff --git a/upstream/docs/variables.md b/upstream/docs/variables.md
index 8b3b9a77031..df3dadd818f 100644
--- a/upstream/docs/variables.md
+++ b/upstream/docs/variables.md
@@ -15,67 +15,63 @@ For instructions on using variable substitutions see the relevant section of [th
## Variables available in a `Pipeline`
-| Variable | Description |
-|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `params.` | The value of the parameter at runtime. |
-| `params['']` | (see above) |
-| `params[""]` | (see above) |
-| `params.[*]` | Get the whole param array or object. |
-| `params[''][*]` | (see above) |
-| `params[""][*]` | (see above) |
-| `params.[i]` | Get the i-th element of param array. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
-| `params[''][i]` | (see above) |
-| `params[""][i]` | (see above) |
-| `params.[*]` | Get the value of the whole object param. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
-| `params..` | Get the value of an individual child of an object param. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
-| `tasks..matrix.length` | The length of the `Matrix` combination count. |
-| `tasks..results.` | The value of the `Task's` result. Can alter `Task` execution order within a `Pipeline`.) |
-| `tasks..results.[i]` | The ith value of the `Task's` array result. Can alter `Task` execution order within a `Pipeline`.) |
-| `tasks..results.[*]` | The array value of the `Task's` result. Can alter `Task` execution order within a `Pipeline`. Cannot be used in `script`.) |
-| `tasks..results..key` | The `key` value of the `Task's` object result. Can alter `Task` execution order within a `Pipeline`.) |
-| `tasks..matrix..length` | The length of the matrixed `Task's` results. (Can alter `Task` execution order within a `Pipeline`.) |
-| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if the `Workspace` declaration has `optional: true` and the Workspace binding was omitted by the PipelineRun. |
-| `context.pipelineRun.name` | The name of the `PipelineRun` that this `Pipeline` is running in. |
-| `context.pipelineRun.namespace` | The namespace of the `PipelineRun` that this `Pipeline` is running in. |
-| `context.pipelineRun.uid` | The uid of the `PipelineRun` that this `Pipeline` is running in. |
-| `context.pipeline.name` | The name of this `Pipeline` . |
-| `tasks..status` | The execution status of the specified `pipelineTask`, only available in `finally` tasks. The execution status can be set to any one of the values (`Succeeded`, `Failed`, or `None`) described [here](pipelines.md#using-execution-status-of-pipelinetask). |
-| `tasks..reason` | The execution reason of the specified `pipelineTask`, only available in `finally` tasks. The reason can be set to any one of the values (`Failed`, `TaskRunCancelled`, `TaskRunTimeout`, `FailureIgnored`, etc ) described [here](taskruns.md#monitoring-execution-status). |
-| `tasks.status` | An aggregate status of all the `pipelineTasks` under the `tasks` section (excluding the `finally` section). This variable is only available in the `finally` tasks and can have any one of the values (`Succeeded`, `Failed`, `Completed`, or `None`) described [here](pipelines.md#using-aggregate-execution-status-of-all-tasks). |
-| `context.pipelineTask.retries` | The retries of this `PipelineTask`. |
-| `tasks..outputs.` | The value of a specific output artifact of the `Task` |
-| `tasks..inputs.` | The value of a specific input artifact of the `Task` |
+| Variable | Description |
+| -------- | ----------- |
+| `params.` | The value of the parameter at runtime. |
+| `params['']` | (see above) |
+| `params[""]` | (see above) |
+| `params.[*]` | Get the whole param array or object.|
+| `params[''][*]` | (see above) |
+| `params[""][*]` | (see above) |
+| `params.[i]` | Get the i-th element of param array. This is alpha feature, set `enable-api-fields` to `alpha` to use it.|
+| `params[''][i]` | (see above) |
+| `params[""][i]` | (see above) |
+| `params.[*]` | Get the value of the whole object param. This is alpha feature, set `enable-api-fields` to `alpha` to use it.|
+| `params..` | Get the value of an individual child of an object param. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
+| `tasks..matrix.length` | The length of the `Matrix` combination count. |
+| `tasks..results.` | The value of the `Task's` result. Can alter `Task` execution order within a `Pipeline`.) |
+| `tasks..results.[i]` | The ith value of the `Task's` array result. Can alter `Task` execution order within a `Pipeline`.) |
+| `tasks..results.[*]` | The array value of the `Task's` result. Can alter `Task` execution order within a `Pipeline`. Cannot be used in `script`.) |
+| `tasks..results..key` | The `key` value of the `Task's` object result. Can alter `Task` execution order within a `Pipeline`.) |
+| `tasks..matrix..length` | The length of the matrixed `Task's` results. (Can alter `Task` execution order within a `Pipeline`.) |
+| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if the `Workspace` declaration has `optional: true` and the Workspace binding was omitted by the PipelineRun. |
+| `context.pipelineRun.name` | The name of the `PipelineRun` that this `Pipeline` is running in. |
+| `context.pipelineRun.namespace` | The namespace of the `PipelineRun` that this `Pipeline` is running in. |
+| `context.pipelineRun.uid` | The uid of the `PipelineRun` that this `Pipeline` is running in. |
+| `context.pipeline.name` | The name of this `Pipeline` . |
+| `tasks..status` | The execution status of the specified `pipelineTask`, only available in `finally` tasks. The execution status can be set to any one of the values (`Succeeded`, `Failed`, or `None`) described [here](pipelines.md#using-execution-status-of-pipelinetask)|
+| `tasks.status` | An aggregate status of all the `pipelineTasks` under the `tasks` section (excluding the `finally` section). This variable is only available in the `finally` tasks and can have any one of the values (`Succeeded`, `Failed`, `Completed`, or `None`) described [here](pipelines.md#using-aggregate-execution-status-of-all-tasks). |
+| `context.pipelineTask.retries` | The retries of this `PipelineTask`. |
## Variables available in a `Task`
-| Variable | Description |
-|----------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------|
-| `params.` | The value of the parameter at runtime. |
-| `params['']` | (see above) |
-| `params[""]` | (see above) |
-| `params.[*]` | Get the whole param array or object. |
-| `params[''][*]` | (see above) |
-| `params[""][*]` | (see above) |
-| `params.[i]` | Get the i-th element of param array. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
-| `params[''][i]` | (see above) |
-| `params[""][i]` | (see above) |
+| Variable | Description |
+| -------- | ----------- |
+| `params.` | The value of the parameter at runtime. |
+| `params['']` | (see above) |
+| `params[""]` | (see above) |
+| `params.[*]` | Get the whole param array or object.|
+| `params[''][*]` | (see above) |
+| `params[""][*]` | (see above) |
+| `params.[i]` | Get the i-th element of param array. This is alpha feature, set `enable-api-fields` to `alpha` to use it.|
+| `params[''][i]` | (see above) |
+| `params[""][i]` | (see above) |
| `params..` | Get the value of an individual child of an object param. This is alpha feature, set `enable-api-fields` to `alpha` to use it. |
-| `results..path` | The path to the file where the `Task` writes its results data. |
-| `results[''].path` | (see above) |
-| `results[""].path` | (see above) |
-| `workspaces..path` | The path to the mounted `Workspace`. Empty string if an optional `Workspace` has not been provided by the TaskRun. |
-| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if an optional`Workspace` has not been provided by the TaskRun. |
-| `workspaces..claim` | The name of the `PersistentVolumeClaim` specified as a volume source for the `Workspace`. Empty string for other volume types. |
-| `workspaces..volume` | The name of the volume populating the `Workspace`. |
-| `credentials.path` | The path to credentials injected from Secrets with matching annotations. |
-| `context.taskRun.name` | The name of the `TaskRun` that this `Task` is running in. |
-| `context.taskRun.namespace` | The namespace of the `TaskRun` that this `Task` is running in. |
-| `context.taskRun.uid` | The uid of the `TaskRun` that this `Task` is running in. |
-| `context.task.name` | The name of this `Task`. |
-| `context.task.retry-count` | The current retry number of this `Task`. |
-| `steps.step-.exitCode.path` | The path to the file where a Step's exit code is stored. |
-| `steps.step-unnamed-.exitCode.path` | The path to the file where a Step's exit code is stored for a step without any name. |
-| `artifacts.path` | The path to the file where the `Task` writes its artifacts data. |
+| `results..path` | The path to the file where the `Task` writes its results data. |
+| `results[''].path` | (see above) |
+| `results[""].path` | (see above) |
+| `workspaces..path` | The path to the mounted `Workspace`. Empty string if an optional `Workspace` has not been provided by the TaskRun. |
+| `workspaces..bound` | Whether a `Workspace` has been bound or not. "false" if an optional`Workspace` has not been provided by the TaskRun. |
+| `workspaces..claim` | The name of the `PersistentVolumeClaim` specified as a volume source for the `Workspace`. Empty string for other volume types. |
+| `workspaces..volume` | The name of the volume populating the `Workspace`. |
+| `credentials.path` | The path to credentials injected from Secrets with matching annotations. |
+| `context.taskRun.name` | The name of the `TaskRun` that this `Task` is running in. |
+| `context.taskRun.namespace` | The namespace of the `TaskRun` that this `Task` is running in. |
+| `context.taskRun.uid` | The uid of the `TaskRun` that this `Task` is running in. |
+| `context.task.name` | The name of this `Task`. |
+| `context.task.retry-count` | The current retry number of this `Task`. |
+| `steps.step-.exitCode.path` | The path to the file where a Step's exit code is stored. |
+| `steps.step-unnamed-.exitCode.path` | The path to the file where a Step's exit code is stored for a step without any name. |
## Fields that accept variable substitutions
diff --git a/upstream/docs/windows.md b/upstream/docs/windows.md
index 93c95f6b7b4..fc3b39bd1dc 100644
--- a/upstream/docs/windows.md
+++ b/upstream/docs/windows.md
@@ -36,12 +36,12 @@ In order to ensure that Tasks are scheduled to a node with the correct host OS,
### Node Selectors
-Node selectors are the simplest way to schedule pods to a Windows or Linux node. By default, Kubernetes nodes include a label `kubernetes.io/os` to identify the host OS. The Kubelet populates this with `runtime.GOOS` as defined by Go. Use `spec.podTemplate.nodeSelector` (or `spec.taskRunSpecs[i].podTemplate.nodeSelector` in a PipelineRun) to schedule Tasks to a node with a specific label and value.
+Node selectors are the simplest way to schedule pods to a Windows or Linux node. By default, Kubernetes nodes include a label `kubernetes.io/os` to identify the host OS. The Kubelet populates this with `runtime.GOOS` as defined by Go. Use `spec.podTemplate.nodeSelector` (or `spec.taskRunSpecs[i].taskPodTemplate.nodeSelector` in a PipelineRun) to schedule Tasks to a node with a specific label and value.
For example:
``` yaml
-apiVersion: tekton.dev/v1
+apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: windows-taskrun
@@ -52,7 +52,7 @@ spec:
nodeSelector:
kubernetes.io/os: windows
---
-apiVersion: tekton.dev/v1
+apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: linux-taskrun
@@ -71,7 +71,7 @@ Node affinity can be used as an alternative method of defining the OS requiremen
For example:
```yaml
-apiVersion: tekton.dev/v1
+apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: windows-taskrun
@@ -89,7 +89,7 @@ spec:
values:
- windows
---
-apiVersion: tekton.dev/v1
+apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: linux-taskrun
diff --git a/upstream/examples/v1/pipelineruns/4808-regression.yaml b/upstream/examples/v1/pipelineruns/4808-regression.yaml
index f1258cba7ce..375f3f3389d 100644
--- a/upstream/examples/v1/pipelineruns/4808-regression.yaml
+++ b/upstream/examples/v1/pipelineruns/4808-regression.yaml
@@ -11,7 +11,7 @@ spec:
type: string
steps:
- name: print-result
- image: mirror.gcr.io/bash
+ image: bash:latest
env:
- name: PARAM_TO_PRINT
value: $(params.TO_PRINT)
@@ -39,7 +39,7 @@ spec:
description: A result string
steps:
- name: gen-result
- image: mirror.gcr.io/bash
+ image: bash:latest
env:
- name: PARAM_STRING_LENGTH
value: $(params.STRING_LENGTH)
diff --git a/upstream/examples/v1/pipelineruns/6139-regression.yaml b/upstream/examples/v1/pipelineruns/6139-regression.yaml
index 3fef69f8a97..4fc16c0ed1a 100644
--- a/upstream/examples/v1/pipelineruns/6139-regression.yaml
+++ b/upstream/examples/v1/pipelineruns/6139-regression.yaml
@@ -15,7 +15,7 @@ spec:
results:
- name: result-one
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: |
#!/bin/sh
echo "Hello world!"
@@ -28,7 +28,7 @@ spec:
results:
- name: result-two
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: |
#!/bin/sh
echo "Goodbye world!"
@@ -43,7 +43,7 @@ spec:
results:
- name: result-three
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: |
#!/bin/sh
echo "Shutdown world!"
diff --git a/upstream/examples/v1/pipelineruns/alpha/consume-artifacts-from-task.yaml b/upstream/examples/v1/pipelineruns/alpha/consume-artifacts-from-task.yaml
deleted file mode 100644
index 6ca60999c7f..00000000000
--- a/upstream/examples/v1/pipelineruns/alpha/consume-artifacts-from-task.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- generateName: pipelinerun-consume-tasks-artifacts
-spec:
- pipelineSpec:
- tasks:
- - name: produce-artifacts-task
- taskSpec:
- description: |
- A simple task that produces artifacts
- steps:
- - name: produce-artifacts
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- cat > $(artifacts.path) << EOF
- {
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
- - name: consume-artifacts
- runAfter:
- - produce-artifacts-task
- taskSpec:
- steps:
- - name: artifacts-consumer-python
- image: python:latest
- script: |
- #!/usr/bin/env python3
- import json
- data = json.loads('$(tasks.produce-artifacts-task.outputs.image)')
- if data[0]['uri'] != "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c":
- exit(1)
\ No newline at end of file
diff --git a/upstream/examples/v1/pipelineruns/alpha/ignore-task-error.yaml b/upstream/examples/v1/pipelineruns/alpha/ignore-task-error.yaml
new file mode 100644
index 00000000000..a7ee3656c3e
--- /dev/null
+++ b/upstream/examples/v1/pipelineruns/alpha/ignore-task-error.yaml
@@ -0,0 +1,25 @@
+apiVersion: tekton.dev/v1
+kind: PipelineRun
+metadata:
+ generateName: pipelinerun-with-failing-task-
+spec:
+ pipelineSpec:
+ tasks:
+ - name: echo-continue
+ onError: continue
+ taskSpec:
+ steps:
+ - name: write
+ image: alpine
+ script: |
+ echo "this is a failing task"
+ exit 1
+ - name: echo
+ runAfter:
+ - echo-continue
+ taskSpec:
+ steps:
+ - name: write
+ image: alpine
+ script: |
+ echo "this is a success task"
diff --git a/upstream/examples/v1/pipelineruns/alpha/param-enum.yaml b/upstream/examples/v1/pipelineruns/alpha/param-enum.yaml
index af35cb5acb2..9f6758bc891 100644
--- a/upstream/examples/v1/pipelineruns/alpha/param-enum.yaml
+++ b/upstream/examples/v1/pipelineruns/alpha/param-enum.yaml
@@ -9,7 +9,7 @@ spec:
enum: ["v1", "v2", "v3"]
steps:
- name: build
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
echo "$(params.message)"
---
diff --git a/upstream/examples/v1/pipelineruns/alpha/pipelinerun-large-results.yaml b/upstream/examples/v1/pipelineruns/alpha/pipelinerun-large-results.yaml
index 6f81715dd77..be1da100bf7 100644
--- a/upstream/examples/v1/pipelineruns/alpha/pipelinerun-large-results.yaml
+++ b/upstream/examples/v1/pipelineruns/alpha/pipelinerun-large-results.yaml
@@ -20,7 +20,7 @@ spec:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
# produce a result - a random string with 2,500 characters - result1
tr -dc A-Za-z0-9 $(results.suffix.path)
- name: do-something
@@ -25,7 +25,7 @@ spec:
- name: arg
steps:
- name: do-something
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "$(params.arg)" | grep "prefix:suffix"
params:
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-results.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-results.yaml
index 3da5ae4cc86..ceae7eb5de3 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-results.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-results.yaml
@@ -13,7 +13,7 @@ spec:
description: the sum of the first and second operand
steps:
- name: add
- image: mirror.gcr.io/alpine
+ image: alpine
env:
- name: OP1
value: $(params.first)
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-task-execution-status.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-task-execution-status.yaml
index 73c7022724e..03374bb7206 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-task-execution-status.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-task-execution-status.yaml
@@ -10,7 +10,7 @@ spec:
- name: task1 # successful task
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: hello
script: |
echo "Hello World!"
@@ -21,7 +21,7 @@ spec:
values: ["true"]
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: success
script: |
exit 0
@@ -37,7 +37,7 @@ spec:
- name: task1Status
- name: task2Status
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: verify-dag-task-status
script: |
if [[ $(params.task1Status) != "Succeeded" || $(params.task2Status) != "None" ]]; then
@@ -51,7 +51,7 @@ spec:
params:
- name: aggregateStatus
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: verify-aggregate-tasks-status
script: |
if [[ $(params.aggregateStatus) != "Completed" ]]; then
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-using-different-subpaths-of-workspace.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-using-different-subpaths-of-workspace.yaml
index 183a8cac004..e70184d1a71 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-using-different-subpaths-of-workspace.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-using-different-subpaths-of-workspace.yaml
@@ -5,7 +5,7 @@ metadata:
spec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo bar > $(workspaces.task-ws.path)/foo
workspaces:
- name: task-ws
@@ -24,10 +24,10 @@ spec:
- name: local-ws
steps:
- name: read-1
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.local-ws.path)/$(params.directory1)/foo | grep bar
- name: read-2
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.local-ws.path)/$(params.directory2)/foo | grep bar
---
apiVersion: tekton.dev/v1
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-using-parameterized-subpath-of-workspace.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-using-parameterized-subpath-of-workspace.yaml
index e0dd0c66f55..6028a16e959 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-using-parameterized-subpath-of-workspace.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-using-parameterized-subpath-of-workspace.yaml
@@ -5,7 +5,7 @@ metadata:
spec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo bar > $(workspaces.task-ws.path)/foo
workspaces:
- name: task-ws
@@ -24,10 +24,10 @@ spec:
- name: local-ws
steps:
- name: read-1
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.local-ws.path)/$(params.directory1)/foo | grep bar
- name: read-2
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.local-ws.path)/$(params.directory2)/foo | grep bar
---
apiVersion: tekton.dev/v1
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-extra-params.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-extra-params.yaml
index 4cc2b08d372..7e2472700a9 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-extra-params.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-extra-params.yaml
@@ -35,7 +35,7 @@ spec:
description: The second integer
steps:
- name: sum
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(inputs.params.a)" + "$(inputs.params.b)" ))
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-final-results.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-final-results.yaml
index e8e9d3d3004..e3226a82795 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-final-results.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-final-results.yaml
@@ -58,7 +58,7 @@ spec:
description: The product of the two provided integers
steps:
- name: product
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(params.a)" * "$(params.b)" )) | tee $(results.product.path)
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-final-tasks.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-final-tasks.yaml
index c89852f0fb5..f410943e7ef 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-final-tasks.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-final-tasks.yaml
@@ -116,18 +116,18 @@ spec:
- name: source
steps:
- name: check-application-dir-has-source
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ ! -d "$(workspaces.source.path)/application/" ]; then
echo "Something went wrong and could not find application source under $(workspaces.source.path)/application/"
exit 1
fi
- name: cleanup-workspace
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
rm -rf $(workspaces.source.path)/application/
- name: verify-application-dir-has-gone
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ -d "$(workspaces.source.path)/application/" ]; then
echo "Something went wrong cleaning up and the application source still exists under $(workspaces.source.path)/application/"
@@ -174,7 +174,7 @@ spec:
- name: commit
steps:
- name: check-commit-initialized
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
if [[ ! $(params.commit) ]]; then
exit 1
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-parallel-tasks-using-pvc.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-parallel-tasks-using-pvc.yaml
index d1d227f7430..c4668041b2c 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-parallel-tasks-using-pvc.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-parallel-tasks-using-pvc.yaml
@@ -106,7 +106,7 @@ spec:
description: A result message
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.message) | tee $(workspaces.task-ws.path)/message $(results.message.path)
workspaces:
- name: task-ws
@@ -127,7 +127,7 @@ spec:
description: Input message in upper case
steps:
- name: to-upper
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.w.path)/$(params.input-path) | tr '[:lower:]' '[:upper:]' | tee $(workspaces.w.path)/upper $(results.message.path)
workspaces:
- name: w
@@ -148,7 +148,7 @@ spec:
description: Input message in lower case
steps:
- name: to-lower
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.w.path)/$(params.input-path) | tr '[:upper:]' '[:lower:]' | tee $(workspaces.w.path)/lower $(results.message.path)
workspaces:
- name: w
@@ -166,7 +166,7 @@ spec:
type: string
steps:
- name: report-result
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.result-to-report)
---
apiVersion: tekton.dev/v1
@@ -176,10 +176,10 @@ metadata:
spec:
steps:
- name: validate-upper
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.files.path)/upper | grep HELLO\ TEKTON
- name: validate-lower
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.files.path)/lower | grep hello\ tekton
workspaces:
- name: files
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-params.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-params.yaml
index 232965eae9a..5a86ef4841b 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-params.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-params.yaml
@@ -47,7 +47,7 @@ spec:
description: The second integer
steps:
- name: sum
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(inputs.params.a)" + "$(inputs.params.b)" ))
@@ -71,7 +71,7 @@ spec:
description: The second integer
steps:
- name: product
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(inputs.params.a)" * "$(inputs.params.b)" ))
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec-and-taskspec.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec-and-taskspec.yaml
index 45431740245..60316328506 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec-and-taskspec.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec-and-taskspec.yaml
@@ -12,7 +12,7 @@ spec:
app: "example"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "Good Morning!"
@@ -38,7 +38,7 @@ spec:
default: "Hello World!"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.MESSAGE)"
@@ -70,7 +70,7 @@ spec:
default: "Hello World!"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.MESSAGE)"
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec.yaml
index d97f93f01dd..c7dd4455244 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-pipelinespec.yaml
@@ -9,7 +9,7 @@ spec:
default: "Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- echo
args:
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun-with-when-expressions.yaml b/upstream/examples/v1/pipelineruns/pipelinerun-with-when-expressions.yaml
index fd0768a7d78..1033bd5d99f 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun-with-when-expressions.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun-with-when-expressions.yaml
@@ -32,7 +32,7 @@ spec:
description: The workspace to create the readme file in
steps:
- name: write-new-stuff
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: 'touch $(workspaces.source.path)/README.md'
- name: check-file
params:
@@ -54,7 +54,7 @@ spec:
description: indicates whether the file exists or is missing
steps:
- name: check-file
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
if test -f $(workspaces.source.path)/$(params.path); then
printf yes | tee $(results.exists.path)
@@ -69,7 +69,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: 'echo file exists'
- name: sample-task-with-array-values
when:
@@ -79,7 +79,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/alpine
+ image: alpine
script: 'echo hello'
- name: task-should-be-skipped-1
when:
@@ -89,7 +89,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: task-should-be-skipped-2 # when expression using parameter, evaluates to false
when:
@@ -99,7 +99,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: task-should-be-skipped-3 # task with when expression and run after
runAfter:
@@ -111,7 +111,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: task-should-be-skipped-4 # task with when expression using array parameter, evaluates to false
when:
@@ -121,7 +121,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/alpine
+ image: alpine
script: exit 1
finally:
- name: finally-task-should-be-skipped-1 # when expression using execution status, evaluates to false
@@ -132,7 +132,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: finally-task-should-be-skipped-2 # when expression using task result, evaluates to false
when:
@@ -142,7 +142,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: finally-task-should-be-skipped-3 # when expression using parameter, evaluates to false
when:
@@ -152,7 +152,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: finally-task-should-be-skipped-4 # when expression using tasks execution status, evaluates to false
when:
@@ -162,7 +162,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: finally-task-should-be-skipped-5 # when expression using tasks execution status, evaluates to false
when:
@@ -172,7 +172,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: exit 1
- name: finally-task-should-be-executed # when expression using execution status, tasks execution status, param, and results
when:
@@ -191,7 +191,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: 'echo finally done'
params:
- name: path
diff --git a/upstream/examples/v1/pipelineruns/pipelinerun.yaml b/upstream/examples/v1/pipelineruns/pipelinerun.yaml
index fb7c43fcd33..6c4a6617364 100644
--- a/upstream/examples/v1/pipelineruns/pipelinerun.yaml
+++ b/upstream/examples/v1/pipelineruns/pipelinerun.yaml
@@ -161,7 +161,7 @@ spec:
securityContext:
runAsUser: 0
- name: write-url
- image: mirror.gcr.io/bash:5.1.4@sha256:c523c636b722339f41b6a431b44588ab2f762c5de5ec3bd7964420ff982fb1d9
+ image: docker.io/library/bash:5.1.4@sha256:c523c636b722339f41b6a431b44588ab2f762c5de5ec3bd7964420ff982fb1d9
script: |
set -e
image="$(params.IMAGE)"
diff --git a/upstream/examples/v1/pipelineruns/propagating-workspaces-in-pipelines.yaml b/upstream/examples/v1/pipelineruns/propagating-workspaces-in-pipelines.yaml
deleted file mode 100644
index c59740378ba..00000000000
--- a/upstream/examples/v1/pipelineruns/propagating-workspaces-in-pipelines.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: Pipeline
-metadata:
- name: workspace-example
-spec:
- workspaces:
- - name: shared-data
- tasks:
- - name: t1
- taskSpec:
- steps:
- - image: mirror.gcr.io/ubuntu
- command: ["ls"]
- args: ["$(workspaces.shared-data.path)"]
----
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: workspace-example-run
-spec:
- pipelineRef:
- name: workspace-example
- workspaces:
- - name: shared-data
- volumeClaimTemplate:
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 16Mi
- volumeMode: Filesystem
diff --git a/upstream/examples/v1/pipelineruns/propagating-workspaces.yaml b/upstream/examples/v1/pipelineruns/propagating-workspaces.yaml
index aa2c64ad47c..3b7474f9514 100644
--- a/upstream/examples/v1/pipelineruns/propagating-workspaces.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating-workspaces.yaml
@@ -57,7 +57,7 @@ spec:
taskSpec:
steps:
- name: fetch-and-write-secure
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ "hunter2" = "$(cat $(workspaces.password-vault.path)/password)" ]; then
cp $(workspaces.recipe-store.path)/recipe.txt $(workspaces.shared-data.path)
@@ -72,5 +72,5 @@ spec:
taskSpec:
steps:
- name: print-secrets
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.shared-data.path)/$(params.filename)
diff --git a/upstream/examples/v1/pipelineruns/propagating_params_implicit_parameters.yaml b/upstream/examples/v1/pipelineruns/propagating_params_implicit_parameters.yaml
index dde58ab75c4..e1d8fc147ce 100644
--- a/upstream/examples/v1/pipelineruns/propagating_params_implicit_parameters.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating_params_implicit_parameters.yaml
@@ -12,7 +12,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.HELLO)"
@@ -21,7 +21,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "And Finally ... $(params.HELLO)"
diff --git a/upstream/examples/v1/pipelineruns/propagating_params_in_pipeline.yaml b/upstream/examples/v1/pipelineruns/propagating_params_in_pipeline.yaml
deleted file mode 100644
index e91b5d04cd8..00000000000
--- a/upstream/examples/v1/pipelineruns/propagating_params_in_pipeline.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: Pipeline
-metadata:
- name: propagating-params-in-pipeline
-spec:
- params:
- - name: HELLO
- default: "Pipeline Hello World!"
- tasks:
- - name: echo-hello
- taskSpec:
- steps:
- - name: echo
- image: mirror.gcr.io/ubuntu
- script: |
- #!/usr/bin/env bash
- echo "$(params.HELLO)"
- finally:
- - name: echo-hello-finally
- taskSpec:
- steps:
- - name: echo
- image: mirror.gcr.io/ubuntu
- script: |
- #!/usr/bin/env bash
- echo "And Finally ... $(params.HELLO)"
----
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- generateName: propagating-params-in-pipeline-
-spec:
- params:
- - name: HELLO
- value: "Hello from pipeline run"
- pipelineRef:
- name: propagating-params-in-pipeline
\ No newline at end of file
diff --git a/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence.yaml b/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence.yaml
index 3adc9e6528e..273397e8d7a 100644
--- a/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence.yaml
@@ -15,7 +15,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.HELLO)"
@@ -27,7 +27,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "And finally.. $(params.HELLO)"
diff --git a/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence_default_only.yaml b/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence_default_only.yaml
index 835c48c30ec..697c5793a3f 100644
--- a/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence_default_only.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating_params_with_scope_precedence_default_only.yaml
@@ -15,7 +15,7 @@ spec:
default: "Default Hello World!"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.HELLO)"
@@ -27,7 +27,7 @@ spec:
default: "Default Hello World!"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "And finally... $(params.HELLO)"
diff --git a/upstream/examples/v1/pipelineruns/propagating_results_implicit_resultref.yaml b/upstream/examples/v1/pipelineruns/propagating_results_implicit_resultref.yaml
index a8b4fc2e30a..d0a16eae5ab 100644
--- a/upstream/examples/v1/pipelineruns/propagating_results_implicit_resultref.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating_results_implicit_resultref.yaml
@@ -8,7 +8,7 @@ spec:
type: string
steps:
- name: uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo "1001" | tee $(results.uid.path)
@@ -27,7 +27,7 @@ spec:
taskSpec:
steps:
- name: show-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo $(tasks.add-uid.results.uid)
diff --git a/upstream/examples/v1/pipelineruns/propagating_workspaces_with_referenced_resources.yaml b/upstream/examples/v1/pipelineruns/propagating_workspaces_with_referenced_resources.yaml
index acf31687a53..01bdc63e23f 100644
--- a/upstream/examples/v1/pipelineruns/propagating_workspaces_with_referenced_resources.yaml
+++ b/upstream/examples/v1/pipelineruns/propagating_workspaces_with_referenced_resources.yaml
@@ -8,7 +8,7 @@ spec:
- name: shared-data
steps:
- name: fetch-and-write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo hi >> $(workspaces.shared-data.path)/recipe.txt
---
@@ -38,7 +38,7 @@ spec:
taskSpec:
steps:
- name: print-secrets
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.shared-data.path)/recipe.txt
runAfter:
- fetch-the-recipe
diff --git a/upstream/examples/v1/pipelineruns/task_results_example.yaml b/upstream/examples/v1/pipelineruns/task_results_example.yaml
index b0c67e5d2f1..17a35e159d4 100644
--- a/upstream/examples/v1/pipelineruns/task_results_example.yaml
+++ b/upstream/examples/v1/pipelineruns/task_results_example.yaml
@@ -58,7 +58,7 @@ spec:
description: The sum of the two provided integers
steps:
- name: sum
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(params.a)" + "$(params.b)" )) | tee $(results.sum.path)
@@ -85,7 +85,7 @@ spec:
description: The product of the two provided integers
steps:
- name: product
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n $(( "$(params.a)" * "$(params.b)" )) | tee $(results.product.path)
diff --git a/upstream/examples/v1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml b/upstream/examples/v1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml
index 596067f5daa..6fca152a139 100644
--- a/upstream/examples/v1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml
+++ b/upstream/examples/v1/pipelineruns/using-optional-workspaces-in-when-expressions.yaml
@@ -47,7 +47,7 @@ spec:
- name: message-of-the-day
optional: true
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: |
#!/usr/bin/env ash
for f in "$(workspaces.message-of-the-day.path)"/* ; do
@@ -63,6 +63,6 @@ spec:
taskSpec:
steps:
- name: print-default
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "No message-of-the-day workspace was provided. This is the default MOTD instead!"
diff --git a/upstream/examples/v1/pipelineruns/using-retries-and-retry-count-variables.yaml b/upstream/examples/v1/pipelineruns/using-retries-and-retry-count-variables.yaml
index 67eb21388d8..cbaa64facc3 100644
--- a/upstream/examples/v1/pipelineruns/using-retries-and-retry-count-variables.yaml
+++ b/upstream/examples/v1/pipelineruns/using-retries-and-retry-count-variables.yaml
@@ -19,7 +19,7 @@ spec:
- name: pipelineTask-retries
- name: pipelineTask-retry-count
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine:3.12.0
script: |
#!/usr/bin/env sh
if [ "$(params.pipelineTask-retry-count)" == "$(params.pipelineTask-retries)" ]; then
@@ -33,6 +33,6 @@ spec:
- retry-me
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: |
echo "hello world"
diff --git a/upstream/examples/v1/pipelineruns/using_context_variables.yaml b/upstream/examples/v1/pipelineruns/using_context_variables.yaml
index 151909d11da..7d7de6661ff 100644
--- a/upstream/examples/v1/pipelineruns/using_context_variables.yaml
+++ b/upstream/examples/v1/pipelineruns/using_context_variables.yaml
@@ -28,12 +28,12 @@ spec:
- name: pipelineRun-namespace
- name: pipelineTask-retries
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: print-uid
script: |
echo "TaskRun UID: $(context.taskRun.uid)"
echo "PipelineRun UID from params: $(params.pipeline-uid)"
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: print-names
script: |
echo "Task name: $(context.task.name)"
@@ -41,7 +41,7 @@ spec:
echo "Pipeline name from params: $(params.pipeline-name)"
echo "PipelineRun name from params: $(params.pipelineRun-name)"
echo "PipelineRun namespace from params: $(params.pipelineRun-namespace)"
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: print-retries
script: |
echo "PipelineTask retries from params: $(params.pipelineTask-retries)"
diff --git a/upstream/examples/v1/pipelineruns/workspace-from-volumeclaimtemplate.yaml b/upstream/examples/v1/pipelineruns/workspace-from-volumeclaimtemplate.yaml
index 9b37da6523d..a789709ee6f 100644
--- a/upstream/examples/v1/pipelineruns/workspace-from-volumeclaimtemplate.yaml
+++ b/upstream/examples/v1/pipelineruns/workspace-from-volumeclaimtemplate.yaml
@@ -8,7 +8,7 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo bar > $(workspaces.task-ws.path)/foo
workspaces:
- name: task-ws
@@ -21,7 +21,7 @@ spec:
taskSpec:
steps:
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.myws.path)/foo | grep bar
workspaces:
- name: myws
diff --git a/upstream/examples/v1/pipelineruns/workspaces-projected.yaml b/upstream/examples/v1/pipelineruns/workspaces-projected.yaml
index d6c71f04aa1..56b3c9b871a 100644
--- a/upstream/examples/v1/pipelineruns/workspaces-projected.yaml
+++ b/upstream/examples/v1/pipelineruns/workspaces-projected.yaml
@@ -56,7 +56,7 @@ spec:
- name: filedrop
steps:
- name: fetch-and-write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ "hunter2" = "$(cat $(workspaces.secure-store.path)/password)" ]; then
cp $(workspaces.secure-store.path)/recipe.txt $(workspaces.filedrop.path)
@@ -77,7 +77,7 @@ spec:
- name: filename
steps:
- name: print-secrets
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.storage.path)/$(params.filename)
---
apiVersion: tekton.dev/v1
diff --git a/upstream/examples/v1/pipelineruns/workspaces.yaml b/upstream/examples/v1/pipelineruns/workspaces.yaml
index 7c5b8b40f45..38d6116de39 100644
--- a/upstream/examples/v1/pipelineruns/workspaces.yaml
+++ b/upstream/examples/v1/pipelineruns/workspaces.yaml
@@ -56,7 +56,7 @@ spec:
- name: filedrop
steps:
- name: fetch-and-write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ "hunter2" = "$(cat $(workspaces.super-secret-password.path)/password)" ]; then
cp $(workspaces.secure-store.path)/recipe.txt $(workspaces.filedrop.path)
@@ -77,7 +77,7 @@ spec:
- name: filename
steps:
- name: print-secrets
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.storage.path)/$(params.filename)
---
apiVersion: tekton.dev/v1
diff --git a/upstream/examples/v1/taskruns/beta/large-task-result.yaml b/upstream/examples/v1/taskruns/alpha/large-task-result.yaml
similarity index 93%
rename from upstream/examples/v1/taskruns/beta/large-task-result.yaml
rename to upstream/examples/v1/taskruns/alpha/large-task-result.yaml
index 9ff63507da7..4401fc10208 100644
--- a/upstream/examples/v1/taskruns/beta/large-task-result.yaml
+++ b/upstream/examples/v1/taskruns/alpha/large-task-result.yaml
@@ -14,13 +14,13 @@ spec:
- name: result5
steps:
- name: step1
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
cat /dev/urandom | head -c 2500 | base64 | tee $(results.result1.path) #about 1 K result
cat /dev/urandom | head -c 2500 | base64 | tee $(results.result2.path) #about 4 K result
- name: step2
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
cat /dev/urandom | head -c 2500 | base64 | tee $(results.result3.path) #about 1 K result
diff --git a/upstream/examples/v1/taskruns/alpha/param-enum.yaml b/upstream/examples/v1/taskruns/alpha/param-enum.yaml
index 7aac281d218..77deb40549f 100644
--- a/upstream/examples/v1/taskruns/alpha/param-enum.yaml
+++ b/upstream/examples/v1/taskruns/alpha/param-enum.yaml
@@ -9,7 +9,7 @@ spec:
default: "v1"
steps:
- name: build
- image: mirror.gcr.io/bash
+ image: bash:3.2
script: |
echo "$(params.message)"
---
diff --git a/upstream/examples/v1/taskruns/alpha/produce-consume-artifacts.yaml b/upstream/examples/v1/taskruns/alpha/produce-consume-artifacts.yaml
index 3238649b2ea..e270b1b85ff 100644
--- a/upstream/examples/v1/taskruns/alpha/produce-consume-artifacts.yaml
+++ b/upstream/examples/v1/taskruns/alpha/produce-consume-artifacts.yaml
@@ -6,12 +6,9 @@ spec:
taskSpec:
description: |
A simple task that populates artifacts to TaskRun stepState
- params:
- - name: buildOutput
- default: true
steps:
- name: artifacts-producer
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
cat > $(step.artifacts.path) << EOF
{
@@ -20,7 +17,7 @@ spec:
"name":"input-artifacts",
"values":[
{
- "uri":"pkg:example.github.com/inputs",
+ "uri":"git:jjjsss",
"digest":{
"sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
}
@@ -31,10 +28,9 @@ spec:
"outputs":[
{
"name":"image",
- "buildOutput": $(params.buildOutput),
"values":[
{
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
+ "uri":"pkg:balba",
"digest":{
"sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
"sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
@@ -46,13 +42,14 @@ spec:
}
EOF
- name: artifacts-consumer
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
+ echo $(steps.artifacts-producer.outputs)
echo $(steps.artifacts-producer.inputs.input-artifacts)
- name: artifacts-consumer-python
- image: mirror.gcr.io/python:3.12.4
+ image: python:latest
script: |
#!/usr/bin/env python3
import json
- data = json.loads('$(steps.artifacts-producer.outputs.image)')
+ data = json.loads('$(steps.artifacts-producer.outputs)')
print(data[0]['uri'])
diff --git a/upstream/examples/v1/taskruns/alpha/step-stream-results.yaml b/upstream/examples/v1/taskruns/alpha/step-stream-results.yaml
index 2e315f9c4a6..c94d7d9d9ed 100644
--- a/upstream/examples/v1/taskruns/alpha/step-stream-results.yaml
+++ b/upstream/examples/v1/taskruns/alpha/step-stream-results.yaml
@@ -20,7 +20,7 @@ spec:
image: gcr.io/go-containerregistry/crane
args:
- digest
- - mirror.gcr.io/bash
+ - docker.io/library/bash
stdoutConfig:
path: $(results.digest.path)
stderrConfig:
@@ -28,7 +28,7 @@ spec:
# This step redirects stdout and stderr to the path for the Task
# result `combined`.
- name: error
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
onError: continue
script: echo "combined message" && nonsense
stdoutConfig:
@@ -38,7 +38,7 @@ spec:
# This step prints out the contents of the task results
# if they exist
- name: log-results
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
script: |
if [[ -f $(results.digest.path) ]]
then
diff --git a/upstream/examples/v1/taskruns/alpha/step-stream-volumes.yaml b/upstream/examples/v1/taskruns/alpha/step-stream-volumes.yaml
index 0924e6ad2c6..2505c7855fe 100644
--- a/upstream/examples/v1/taskruns/alpha/step-stream-volumes.yaml
+++ b/upstream/examples/v1/taskruns/alpha/step-stream-volumes.yaml
@@ -10,7 +10,7 @@ spec:
- name: data
steps:
- name: echo
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
volumeMounts:
- name: data
mountPath: /data
@@ -18,7 +18,7 @@ spec:
stdoutConfig:
path: /data/step-echo-stdout
- name: error
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
onError: continue
volumeMounts:
- name: data
@@ -29,7 +29,7 @@ spec:
stderrConfig:
path: /data/step-error-stderr
- name: combined
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
onError: continue
volumeMounts:
- name: data
@@ -40,7 +40,7 @@ spec:
stderrConfig:
path: /data/step-combined
- name: cat
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
volumeMounts:
- name: data
mountPath: /data
@@ -48,7 +48,7 @@ spec:
stdoutConfig:
path: /data/step-cat-stdout
- name: log-data
- image: mirror.gcr.io/bash
+ image: docker.io/library/bash
volumeMounts:
- name: data
mountPath: /data
diff --git a/upstream/examples/v1/taskruns/alpha/step-stream-workspace.yaml b/upstream/examples/v1/taskruns/alpha/step-stream-workspace.yaml
index 24d055c0204..d8f632d115e 100644
--- a/upstream/examples/v1/taskruns/alpha/step-stream-workspace.yaml
+++ b/upstream/examples/v1/taskruns/alpha/step-stream-workspace.yaml
@@ -18,7 +18,7 @@ spec:
- name: data
args:
- digest
- - mirror.gcr.io/bash
+ - docker.io/library/bash
stdoutConfig:
path: $(results.digest.path)
- image: gcr.io/go-containerregistry/crane
@@ -26,7 +26,7 @@ spec:
- name: data
args:
- digest
- - mirror.gcr.io/bash
+ - docker.io/library/bash
stdoutConfig:
path: $(workspaces.data.path)/stdout.txt
- image: gcr.io/go-containerregistry/crane
@@ -43,26 +43,26 @@ spec:
- wrong
stderrConfig:
path: $(results.error2.path)
- - image: mirror.gcr.io/bash
+ - image: docker.io/library/bash
workspaces:
- name: data
onError: continue
args:
- -c
- "echo foobar >$(workspaces.data.path)/foobar.txt"
- - image: mirror.gcr.io/bash
+ - image: docker.io/library/bash
onError: continue
args:
- -c
- "2>$(results.error.path) >&2 echo -n fooerr"
- - image: mirror.gcr.io/bash
+ - image: docker.io/library/bash
workspaces:
- name: data
workingDir: $(workspaces.data.path)
onError: continue
script: |
echo local >out.txt
- - image: mirror.gcr.io/bash
+ - image: docker.io/library/bash
workspaces:
- name: data
onError: continue
diff --git a/upstream/examples/v1/taskruns/beta/stepaction-git-resolver.yaml b/upstream/examples/v1/taskruns/alpha/stepaction-git-resolver.yaml
similarity index 62%
rename from upstream/examples/v1/taskruns/beta/stepaction-git-resolver.yaml
rename to upstream/examples/v1/taskruns/alpha/stepaction-git-resolver.yaml
index 3bdaf7df327..346f515be67 100644
--- a/upstream/examples/v1/taskruns/beta/stepaction-git-resolver.yaml
+++ b/upstream/examples/v1/taskruns/alpha/stepaction-git-resolver.yaml
@@ -6,11 +6,11 @@ metadata:
spec:
params:
- name: pathInRepo
- value: stepaction/git-clone/0.1/git-clone.yaml
+ value: basic_step.yaml
- name: revision
value: main
- name: repoUrl
- value: https://github.com/tektoncd/catalog.git
+ value: https://github.com/chitrangpatel/repo1M.git
TaskSpec:
steps:
- name: action-runner
@@ -23,10 +23,3 @@ spec:
value: $(params.revision)
- name: pathInRepo
value: $(params.pathInRepo)
- params:
- - name: url
- value: https://github.com/kelseyhightower/nocode
- - name: revision
- value: master
- - name: output-path
- value: /workspace
diff --git a/upstream/examples/v1/taskruns/beta/stepaction-params.yaml b/upstream/examples/v1/taskruns/alpha/stepaction-params.yaml
similarity index 97%
rename from upstream/examples/v1/taskruns/beta/stepaction-params.yaml
rename to upstream/examples/v1/taskruns/alpha/stepaction-params.yaml
index c40c89cf6c6..52402bbdfe6 100644
--- a/upstream/examples/v1/taskruns/beta/stepaction-params.yaml
+++ b/upstream/examples/v1/taskruns/alpha/stepaction-params.yaml
@@ -1,4 +1,4 @@
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: step-action
@@ -40,7 +40,7 @@ spec:
value: $(params.object-param.key2)
- name: objectparamkey3
value: $(params.object-param.key3)
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/bin/bash
ARRAYVALUE=("${arrayparam0}" "${arrayparam1}" "${arrayparam2}" "${stringparam}" "${objectparamkey1}" "${objectparamkey2}" "${objectparamkey3}")
diff --git a/upstream/examples/v1/taskruns/beta/stepaction-passing-results.yaml b/upstream/examples/v1/taskruns/alpha/stepaction-passing-results.yaml
similarity index 95%
rename from upstream/examples/v1/taskruns/beta/stepaction-passing-results.yaml
rename to upstream/examples/v1/taskruns/alpha/stepaction-passing-results.yaml
index 56857a4a1bc..e02a771c746 100644
--- a/upstream/examples/v1/taskruns/beta/stepaction-passing-results.yaml
+++ b/upstream/examples/v1/taskruns/alpha/stepaction-passing-results.yaml
@@ -1,4 +1,4 @@
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: step-action
@@ -15,7 +15,7 @@ spec:
type: string
IMAGE_DIGEST:
type: string
- image: mirror.gcr.io/bash
+ image: bash:3.2
env:
- name: STRINGPARAM
value: $(params.param2)
@@ -75,7 +75,7 @@ spec:
type: string
IMAGE_DIGEST:
type: string
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "[\"image1\", \"image2\", \"image3\"]" | tee $(step.results.result1.path)
echo -n "foo" | tee $(step.results.result2.path)
diff --git a/upstream/examples/v1/taskruns/beta/stepaction-results.yaml b/upstream/examples/v1/taskruns/alpha/stepaction-results.yaml
similarity index 89%
rename from upstream/examples/v1/taskruns/beta/stepaction-results.yaml
rename to upstream/examples/v1/taskruns/alpha/stepaction-results.yaml
index a12336b3023..7ea048830a7 100644
--- a/upstream/examples/v1/taskruns/beta/stepaction-results.yaml
+++ b/upstream/examples/v1/taskruns/alpha/stepaction-results.yaml
@@ -1,9 +1,9 @@
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: step-action
spec:
- image: mirror.gcr.io/alpine
+ image: alpine
results:
- name: result1
- name: result2
diff --git a/upstream/examples/v1/taskruns/alpha/stepaction-when.yaml b/upstream/examples/v1/taskruns/alpha/stepaction-when.yaml
deleted file mode 100644
index 17ce0508adb..00000000000
--- a/upstream/examples/v1/taskruns/alpha/stepaction-when.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
- name: my-pvc-2
-spec:
- resources:
- requests:
- storage: 5Gi
- volumeMode: Filesystem
- accessModes:
- - ReadWriteOnce
----
-apiVersion: tekton.dev/v1alpha1
-kind: StepAction
-metadata:
- name: step-action-when
-spec:
- image: alpine
- script: |
- echo "I am a Step Action!!!"
----
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: when-in-steps-
-spec:
- workspaces:
- - name: custom
- persistentVolumeClaim:
- claimName: my-pvc-2
- taskSpec:
- description: |
- A simple task to demotrate how when expressions work in steps.
- steps:
- - name: should-execute
- ref:
- name: "step-action-when"
- when:
- - input: "$(workspaces.custom.bound)"
- operator: in
- values: ["true"]
- - name: should-skip
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- echo skipskipskip
- when:
- - input: "$(workspaces.custom2.bound)"
- operator: in
- values: ["true"]
- - name: should-continue
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- echo blabalbaba
- - name: produce-step
- image: alpine
- results:
- - name: result2
- type: string
- script: |
- echo -n "foo" | tee $(step.results.result2.path)
- - name: run-based-on-step-results
- image: alpine
- script: |
- echo "wooooooo"
- when:
- - input: "$(steps.produce-step.results.result2)"
- operator: in
- values: ["bar"]
- workspaces:
- - name: custom
\ No newline at end of file
diff --git a/upstream/examples/v1/taskruns/beta/stepaction.yaml b/upstream/examples/v1/taskruns/alpha/stepaction.yaml
similarity index 81%
rename from upstream/examples/v1/taskruns/beta/stepaction.yaml
rename to upstream/examples/v1/taskruns/alpha/stepaction.yaml
index 9cb5aff50c9..3acc1e263f4 100644
--- a/upstream/examples/v1/taskruns/beta/stepaction.yaml
+++ b/upstream/examples/v1/taskruns/alpha/stepaction.yaml
@@ -1,9 +1,9 @@
-apiVersion: tekton.dev/v1beta1
+apiVersion: tekton.dev/v1alpha1
kind: StepAction
metadata:
name: step-action
spec:
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "I am a Step Action!!!"
---
diff --git a/upstream/examples/v1/taskruns/alpha/task-artifacts.yaml b/upstream/examples/v1/taskruns/alpha/task-artifacts.yaml
deleted file mode 100644
index 7357bdf245f..00000000000
--- a/upstream/examples/v1/taskruns/alpha/task-artifacts.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: task-run-artifacts
-spec:
- taskSpec:
- description: |
- A simple task that produces artifacts
- steps:
- - name: produce-artifacts
- image: bash:latest
- script: |
- #!/usr/bin/env bash
- cat > $(artifacts.path) << EOF
- {
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
\ No newline at end of file
diff --git a/upstream/examples/v1/taskruns/array-default.yaml b/upstream/examples/v1/taskruns/array-default.yaml
index 45f6fc8cd90..a6cb4216c1c 100644
--- a/upstream/examples/v1/taskruns/array-default.yaml
+++ b/upstream/examples/v1/taskruns/array-default.yaml
@@ -23,7 +23,7 @@ spec:
steps:
# this step should echo "foo bar foo-default bar-default baz"
- name: echo-params
- image: mirror.gcr.io/bash
+ image: bash:3.2
args: [
"echo",
"$(params.array-to-echo[*])",
diff --git a/upstream/examples/v1/taskruns/authenticating-git-commands.yaml b/upstream/examples/v1/taskruns/authenticating-git-commands.yaml
index c8d01221a33..4752c0f0b83 100644
--- a/upstream/examples/v1/taskruns/authenticating-git-commands.yaml
+++ b/upstream/examples/v1/taskruns/authenticating-git-commands.yaml
@@ -63,7 +63,7 @@ spec:
emptyDir: {}
sidecars:
- name: server
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
volumeMounts:
@@ -106,7 +106,7 @@ spec:
- name: setup
# This Step is only necessary as part of the test, it's not something you'll
# ever need in a real-world scenario involving an external git repo.
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
volumeMounts:
@@ -125,7 +125,7 @@ spec:
sleep 1
done
- name: git-clone-and-push
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
workingDir: /root
diff --git a/upstream/examples/v1/taskruns/beta/authenticating-git-commands.yaml b/upstream/examples/v1/taskruns/beta/authenticating-git-commands.yaml
index 6e732b0bec9..b059545b8a1 100644
--- a/upstream/examples/v1/taskruns/beta/authenticating-git-commands.yaml
+++ b/upstream/examples/v1/taskruns/beta/authenticating-git-commands.yaml
@@ -67,7 +67,7 @@ spec:
mountPath: /messages
sidecars:
- name: server
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
script: |
@@ -107,7 +107,7 @@ spec:
- name: setup
# This Step is only necessary as part of the test, it's not something you'll
# ever need in a real-world scenario involving an external git repo.
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
script: |
@@ -123,7 +123,7 @@ spec:
sleep 1
done
- name: git-clone-and-push
- image: docker.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
securityContext:
runAsUser: 0
workingDir: /root
diff --git a/upstream/examples/v1/taskruns/beta/emit-array-results.yaml b/upstream/examples/v1/taskruns/beta/emit-array-results.yaml
index a55c1cd49f8..ac384cd8bf2 100644
--- a/upstream/examples/v1/taskruns/beta/emit-array-results.yaml
+++ b/upstream/examples/v1/taskruns/beta/emit-array-results.yaml
@@ -12,12 +12,12 @@ spec:
description: The array results
steps:
- name: write-array
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"hello\",\"world\"]" | tee $(results.array-results.path)
- name: check-results-array
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/bin/bash
VALUE=$(cat $(results.array-results.path))
diff --git a/upstream/examples/v1/taskruns/beta/param_array_indexing.yaml b/upstream/examples/v1/taskruns/beta/param_array_indexing.yaml
index ec49c4e50b9..c654fbeb260 100644
--- a/upstream/examples/v1/taskruns/beta/param_array_indexing.yaml
+++ b/upstream/examples/v1/taskruns/beta/param_array_indexing.yaml
@@ -15,14 +15,14 @@ spec:
steps:
# this step should echo "foo"
- name: echo-params-1
- image: mirror.gcr.io/bash
+ image: bash:3.2
args: [
"echo",
"$(params.array-to-echo[0])",
]
# this step should echo "bar"
- name: echo-params-2
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/bin/bash
VALUE=$(params.array-to-echo[1])
diff --git a/upstream/examples/v1/taskruns/beta/propagated-object-parameters.yaml b/upstream/examples/v1/taskruns/beta/propagated-object-parameters.yaml
index 7bf2531406f..ecb1c0b00b8 100644
--- a/upstream/examples/v1/taskruns/beta/propagated-object-parameters.yaml
+++ b/upstream/examples/v1/taskruns/beta/propagated-object-parameters.yaml
@@ -11,7 +11,7 @@ spec:
taskSpec:
steps:
- name: echo-object-params
- image: mirror.gcr.io/bash
+ image: bash
args: [
"echo",
"--url=$(params.gitrepo.url)",
diff --git a/upstream/examples/v1/taskruns/beta/stepactions-steptemplate.yaml b/upstream/examples/v1/taskruns/beta/stepactions-steptemplate.yaml
deleted file mode 100644
index 98f9420f4b9..00000000000
--- a/upstream/examples/v1/taskruns/beta/stepactions-steptemplate.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: StepAction
-metadata:
- name: step-action
-spec:
- image: mirror.gcr.io/alpine
- command: ["env"]
----
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: task-run
-spec:
- taskSpec:
- steps:
- - ref:
- name: step-action
- stepTemplate:
- env:
- - name: foo
- value: bar
\ No newline at end of file
diff --git a/upstream/examples/v1/taskruns/beta/workspace-in-sidecar.yaml b/upstream/examples/v1/taskruns/beta/workspace-in-sidecar.yaml
index 4fdae720aa3..47b4236369d 100644
--- a/upstream/examples/v1/taskruns/beta/workspace-in-sidecar.yaml
+++ b/upstream/examples/v1/taskruns/beta/workspace-in-sidecar.yaml
@@ -17,7 +17,7 @@ spec:
workspaces:
- name: signals
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine:3.12.0
computeResources:
requests:
memory: "16Mi"
@@ -33,7 +33,7 @@ spec:
echo "Sidecar responded: ${response}"
echo "Step Done."
sidecars:
- - image: mirror.gcr.io/alpine
+ - image: alpine:3.12.0
computeResources:
requests:
memory: "16Mi"
diff --git a/upstream/examples/v1/taskruns/beta/workspace-isolation.yaml b/upstream/examples/v1/taskruns/beta/workspace-isolation.yaml
index 6e9cc990953..37e6f891537 100644
--- a/upstream/examples/v1/taskruns/beta/workspace-isolation.yaml
+++ b/upstream/examples/v1/taskruns/beta/workspace-isolation.yaml
@@ -18,7 +18,7 @@ spec:
- name: signals
steps:
- name: await-sidecar-signal
- image: mirror.gcr.io/alpine
+ image: alpine:3.12.0
workspaces:
- name: signals
script: |
@@ -32,7 +32,7 @@ spec:
done
echo "Saw ready file"
- name: check-signals-access
- image: mirror.gcr.io/alpine
+ image: alpine:3.12.0
script: |
#!/usr/bin/env ash
if [ -f "$(workspaces.signals.path)"/start ] ; then
@@ -41,7 +41,7 @@ spec:
fi
sidecars:
- name: await-step-signal
- image: mirror.gcr.io/alpine
+ image: alpine:3.12.0
workspaces:
- name: signals
script: |
diff --git a/upstream/examples/v1/taskruns/configmap.yaml b/upstream/examples/v1/taskruns/configmap.yaml
index 44ee9e4227f..df5a4bd12a1 100644
--- a/upstream/examples/v1/taskruns/configmap.yaml
+++ b/upstream/examples/v1/taskruns/configmap.yaml
@@ -13,7 +13,7 @@ spec:
taskSpec:
steps:
- name: secret
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $(cat /config/test.data) == $TEST_DATA ]]
diff --git a/upstream/examples/v1/taskruns/creds-init-only-mounts-provided-credentials.yaml b/upstream/examples/v1/taskruns/creds-init-only-mounts-provided-credentials.yaml
index b29958bbdbf..f2c324000d1 100644
--- a/upstream/examples/v1/taskruns/creds-init-only-mounts-provided-credentials.yaml
+++ b/upstream/examples/v1/taskruns/creds-init-only-mounts-provided-credentials.yaml
@@ -30,7 +30,7 @@ spec:
taskSpec:
steps:
- name: check-credentials
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
#!/usr/bin/env ash
set -xe
diff --git a/upstream/examples/v1/taskruns/custom-env.yaml b/upstream/examples/v1/taskruns/custom-env.yaml
index 0d8c5d13ad9..93bc0961da8 100644
--- a/upstream/examples/v1/taskruns/custom-env.yaml
+++ b/upstream/examples/v1/taskruns/custom-env.yaml
@@ -5,7 +5,7 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
#!/usr/bin/env bash
[[ $MY_VAR1 == foo ]]
diff --git a/upstream/examples/v1/taskruns/custom-volume.yaml b/upstream/examples/v1/taskruns/custom-volume.yaml
index f40af276ea8..5d62cb1daa2 100644
--- a/upstream/examples/v1/taskruns/custom-volume.yaml
+++ b/upstream/examples/v1/taskruns/custom-volume.yaml
@@ -6,7 +6,7 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo some stuff > /im/a/custom/mount/path/file
@@ -14,7 +14,7 @@ spec:
- name: custom
mountPath: /im/a/custom/mount/path
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
cat /short/and/stout/file | grep stuff
diff --git a/upstream/examples/v1/taskruns/default_task_params.yaml b/upstream/examples/v1/taskruns/default_task_params.yaml
deleted file mode 100644
index 0a8f426e5f6..00000000000
--- a/upstream/examples/v1/taskruns/default_task_params.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: Task
-metadata:
- # This has to be explicit instead of `generateName`, since it will be referenced
- # by the TaskRun
- name: example-default-task-param
-spec:
- params:
- - name: input
- default: "No input provided, but that's okay!"
- steps:
- - name: echo-input
- image: mirror.gcr.io/ubuntu
- script: |
- echo "$(params.input)"
----
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- generateName: default-task-params-run-
-spec:
- taskRef:
- name: example-default-task-param
- # # Uncomment this block to override the default param value!
- # params:
- # - name: input
- # value: "You can supply the param from the TaskRun if the default not what you want"
diff --git a/upstream/examples/v1/taskruns/entrypoint-resolution.yaml b/upstream/examples/v1/taskruns/entrypoint-resolution.yaml
index 022a6505d9c..b5b1a65d1eb 100644
--- a/upstream/examples/v1/taskruns/entrypoint-resolution.yaml
+++ b/upstream/examples/v1/taskruns/entrypoint-resolution.yaml
@@ -13,37 +13,37 @@ spec:
# Multi-arch image with no command defined, but with args. We'll look
# up the commands and pass it to the entrypoint binary via env var, then
# append the specified args.
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
args: ['-c', 'echo', 'hello']
# Multi-arch image, but since we specify `script` we don't need to look it
# up and pass it down.
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo hello
# Multi-arch image, but since we specify `command` and `args` we don't
# need to look it up and pass it down.
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
command: ['sh', '-c']
args: ['echo hello']
# Single-platform image with no command defined. We should look up the one
# and only command value and pass it to the Pod.
- - image: docker.io/amd64/ubuntu
+ - image: amd64/ubuntu
# Single-platform image with no command defined, but with args. We'll look
# up the one command and pass it to the entrypoint binary, then append the
# specified args.
- - image: docker.io/amd64/ubuntu
+ - image: amd64/ubuntu
args: ['-c', 'echo', 'hello']
# Single-platform image, but since we specify `script` we don't need to
# look it up and pass it down.
- - image: docker.io/amd64/ubuntu
+ - image: amd64/ubuntu
script: echo hello
# Single-platform image, but since we specify `command` and `args` we
# don't need to look it up and pass it down.
- - image: docker.io/amd64/ubuntu
+ - image: amd64/ubuntu
command: ['sh', '-c']
args: ['echo hello']
diff --git a/upstream/examples/v1/taskruns/home-is-set.yaml b/upstream/examples/v1/taskruns/home-is-set.yaml
index 26c22751369..20a3ae4ab72 100644
--- a/upstream/examples/v1/taskruns/home-is-set.yaml
+++ b/upstream/examples/v1/taskruns/home-is-set.yaml
@@ -5,7 +5,7 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
env:
- name: HOME
value: /tekton/home
diff --git a/upstream/examples/v1/taskruns/home-volume.yaml b/upstream/examples/v1/taskruns/home-volume.yaml
index 235738c2441..a33d76192c4 100644
--- a/upstream/examples/v1/taskruns/home-volume.yaml
+++ b/upstream/examples/v1/taskruns/home-volume.yaml
@@ -6,13 +6,13 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo some stuff > /tekton/home/stuff
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat /tekton/home/stuff
- name: override-homevol
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
# /tekton/home/stuff *doesn't* exist, because the specified volumeMount
# conflicts with it, and the user's explicit declaration wins the tie.
script: |
diff --git a/upstream/examples/v1/taskruns/ignore-step-error.yaml b/upstream/examples/v1/taskruns/ignore-step-error.yaml
index 38a12872b84..95ceff08337 100644
--- a/upstream/examples/v1/taskruns/ignore-step-error.yaml
+++ b/upstream/examples/v1/taskruns/ignore-step-error.yaml
@@ -6,14 +6,14 @@ spec:
taskSpec:
steps:
# exit with 1 and ignore non zero exit code
- - image: mirror.gcr.io/alpine
+ - image: alpine
onError: continue
name: exit-with-1
script: |
exit 1
# check if the /tekton/steps/step-/exitCode got created and contains the exit code
# check if the symlink /tekton/steps/0/ got created
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: verify-step-path
script: |
exitCode=`cat $(steps.step-exit-with-1.exitCode.path)`
diff --git a/upstream/examples/v1/taskruns/no-ci/default-workspaces.yaml b/upstream/examples/v1/taskruns/no-ci/default-workspaces.yaml
index 89822dfa03e..00371ab8495 100644
--- a/upstream/examples/v1/taskruns/no-ci/default-workspaces.yaml
+++ b/upstream/examples/v1/taskruns/no-ci/default-workspaces.yaml
@@ -7,11 +7,11 @@ spec:
- name: source
steps:
- name: write-file
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo "Hello, world!" > /workspace/source/hello.txt || exit 0
- name: read-file
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
grep "Hello, world" /workspace/source/hello.txt
---
diff --git a/upstream/examples/v1/taskruns/no-ci/limitrange.yaml b/upstream/examples/v1/taskruns/no-ci/limitrange.yaml
index 0b853da4eb6..5a7b5160574 100644
--- a/upstream/examples/v1/taskruns/no-ci/limitrange.yaml
+++ b/upstream/examples/v1/taskruns/no-ci/limitrange.yaml
@@ -25,7 +25,7 @@ metadata:
spec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- echo
args:
diff --git a/upstream/examples/v1/taskruns/object-param-result.yaml b/upstream/examples/v1/taskruns/object-param-result.yaml
index fdde89ecdec..0960e910f73 100644
--- a/upstream/examples/v1/taskruns/object-param-result.yaml
+++ b/upstream/examples/v1/taskruns/object-param-result.yaml
@@ -24,7 +24,7 @@ spec:
IMAGE_DIGEST: {type: string}
steps:
- name: validate-object-params
- image: mirror.gcr.io/bash
+ image: bash
args: [
"$(params.gitrepo.url)",
"$(params.gitrepo.commit)"
@@ -43,7 +43,7 @@ spec:
echo "validate the params.gitrepo.commit successfully"
fi
- name: write-object-result
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "{\"IMAGE_URL\":\"ar.com\", \"IMAGE_DIGEST\":\"sha234\"}" > $(results.object-results.path)
diff --git a/upstream/examples/v1/taskruns/optional-workspaces.yaml b/upstream/examples/v1/taskruns/optional-workspaces.yaml
index 237fc9c9c7e..9089b977d27 100644
--- a/upstream/examples/v1/taskruns/optional-workspaces.yaml
+++ b/upstream/examples/v1/taskruns/optional-workspaces.yaml
@@ -14,7 +14,7 @@ spec:
optional: true
steps:
- name: check-workspaces
- image: mirror.gcr.io/alpine
+ image: alpine:3.12.0
script: |
if [ "$(workspaces.source-code.bound)" == "true" ]; then
printf "Source code workspace was provided at %s!\n" "$(workspaces.source-code.path)"
diff --git a/upstream/examples/v1/taskruns/propagating_params_implicit.yaml b/upstream/examples/v1/taskruns/propagating_params_implicit.yaml
index effb2ef9b76..7782b02f0af 100644
--- a/upstream/examples/v1/taskruns/propagating_params_implicit.yaml
+++ b/upstream/examples/v1/taskruns/propagating_params_implicit.yaml
@@ -10,6 +10,6 @@ spec:
# There are no explicit params defined here. They are derived from the TaskRun.
steps:
- name: default
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo $(params.message)
diff --git a/upstream/examples/v1/taskruns/propagating_workspaces.yaml b/upstream/examples/v1/taskruns/propagating_workspaces.yaml
index ac0982a93c0..2c65241f0d1 100644
--- a/upstream/examples/v1/taskruns/propagating_workspaces.yaml
+++ b/upstream/examples/v1/taskruns/propagating_workspaces.yaml
@@ -6,7 +6,7 @@ spec:
taskSpec:
steps:
- name: simple-step
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- echo
args:
diff --git a/upstream/examples/v1/taskruns/readonly-internal-dir.yaml b/upstream/examples/v1/taskruns/readonly-internal-dir.yaml
index 6a5872a884f..6e72fb5c986 100644
--- a/upstream/examples/v1/taskruns/readonly-internal-dir.yaml
+++ b/upstream/examples/v1/taskruns/readonly-internal-dir.yaml
@@ -7,9 +7,9 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: exit 0
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
set +e # dont fail the script on error
@@ -19,7 +19,7 @@ spec:
echo "able to write to run directory of non-current step"
exit 1
fi
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
set +e # dont fail the script on error
diff --git a/upstream/examples/v1/taskruns/run-steps-as-non-root.yaml b/upstream/examples/v1/taskruns/run-steps-as-non-root.yaml
index fabc85444fa..7ac6ae2c631 100644
--- a/upstream/examples/v1/taskruns/run-steps-as-non-root.yaml
+++ b/upstream/examples/v1/taskruns/run-steps-as-non-root.yaml
@@ -7,7 +7,7 @@ spec:
# no securityContext specified so will use
# securityContext from TaskRun podTemplate
- name: show-user-1001
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- ps
args:
@@ -15,7 +15,7 @@ spec:
# securityContext specified so will run as
# user 2000 instead of 1001
- name: show-user-2000
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- ps
args:
diff --git a/upstream/examples/v1/taskruns/secret-env.yaml b/upstream/examples/v1/taskruns/secret-env.yaml
index e3eb35f4ccb..b56be0d3233 100644
--- a/upstream/examples/v1/taskruns/secret-env.yaml
+++ b/upstream/examples/v1/taskruns/secret-env.yaml
@@ -13,7 +13,7 @@ spec:
taskSpec:
steps:
- name: secret
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $SECRET_PASSWORD == SECRET_PASSWORD ]]
diff --git a/upstream/examples/v1/taskruns/secret-volume-params.yaml b/upstream/examples/v1/taskruns/secret-volume-params.yaml
index 4976e5da4de..a8fed24b11d 100644
--- a/upstream/examples/v1/taskruns/secret-volume-params.yaml
+++ b/upstream/examples/v1/taskruns/secret-volume-params.yaml
@@ -16,7 +16,7 @@ spec:
description: Name of secret
type: string
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
#!/usr/bin/env bash
SECRET_PASSWORD=$(cat /var/secret/ninja)
diff --git a/upstream/examples/v1/taskruns/secret-volume.yaml b/upstream/examples/v1/taskruns/secret-volume.yaml
index 9cade7c451a..cc365220732 100644
--- a/upstream/examples/v1/taskruns/secret-volume.yaml
+++ b/upstream/examples/v1/taskruns/secret-volume.yaml
@@ -12,7 +12,7 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
#!/usr/bin/env bash
SECRET_PASSWORD=$(cat /var/secret/ninja)
diff --git a/upstream/examples/v1/taskruns/sidecar-interp.yaml b/upstream/examples/v1/taskruns/sidecar-interp.yaml
index 2330bedbbe3..f8bd4e15cc5 100644
--- a/upstream/examples/v1/taskruns/sidecar-interp.yaml
+++ b/upstream/examples/v1/taskruns/sidecar-interp.yaml
@@ -12,7 +12,7 @@ spec:
emptyDir: {}
sidecars:
- name: value-sidecar
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command:
- /bin/bash
args:
@@ -23,7 +23,7 @@ spec:
mountPath: /shared
steps:
- name: check-value
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/bin/bash
VALUE=$(cat /shared/value)
diff --git a/upstream/examples/v1/taskruns/sidecar-ready-script.yaml b/upstream/examples/v1/taskruns/sidecar-ready-script.yaml
index 9c925c4740d..8c1e17ea5e1 100644
--- a/upstream/examples/v1/taskruns/sidecar-ready-script.yaml
+++ b/upstream/examples/v1/taskruns/sidecar-ready-script.yaml
@@ -6,17 +6,16 @@ spec:
taskSpec:
sidecars:
- name: slow-sidecar
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo "hello from sidecar" > /shared/message
- sleep 2
volumeMounts:
- name: shared
mountPath: /shared
steps:
- name: check-ready
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat /shared/message
volumeMounts:
- name: shared
diff --git a/upstream/examples/v1/taskruns/sidecar-ready.yaml b/upstream/examples/v1/taskruns/sidecar-ready.yaml
index 45ac9572439..66c5b5cc491 100644
--- a/upstream/examples/v1/taskruns/sidecar-ready.yaml
+++ b/upstream/examples/v1/taskruns/sidecar-ready.yaml
@@ -6,7 +6,7 @@ spec:
taskSpec:
sidecars:
- name: slow-sidecar
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command: ['sleep', 'infinity']
# The sidecar takes 5s to report as Ready, even after it starts. If the
# step runs as soon as the sidecar starts, it will fail because
@@ -18,22 +18,13 @@ spec:
- -c
- sleep 5 && touch /shared/ready
timeoutSeconds: 10
- # Adding startup probe for k8s native sidecar support
- # Readiness Probe is not honored for k8s native sidecar support
- startupProbe:
- exec:
- command:
- - sh
- - -c
- - sleep 5 && touch /shared/ready
- timeoutSeconds: 10
volumeMounts:
- name: shared
mountPath: /shared
steps:
- name: check-ready
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
# The step will only succeed if the sidecar has written this file, which
# it does 5s after it starts, before it reports Ready.
script: cat /shared/ready
diff --git a/upstream/examples/v1/taskruns/step-script.yaml b/upstream/examples/v1/taskruns/step-script.yaml
index fafe88f5831..47b59dc1866 100644
--- a/upstream/examples/v1/taskruns/step-script.yaml
+++ b/upstream/examples/v1/taskruns/step-script.yaml
@@ -10,10 +10,10 @@ spec:
steps:
- name: noshebang
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo "no shebang"
- name: bash
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
env:
- name: FOO
value: foooooooo
@@ -33,7 +33,7 @@ spec:
done
- name: place-file
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
securityContext:
runAsUser: 8000
script: |
@@ -41,13 +41,13 @@ spec:
echo "echo Hello from script file" > /workspace/hello
chmod +x /workspace/hello
- name: run-file
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
/workspace/hello
- name: contains-eof
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
securityContext:
runAsUser: 16000
workingDir: /workspace
@@ -59,26 +59,26 @@ spec:
cat file
- name: node
- image: mirror.gcr.io/node:lts-alpine3.20
+ image: node
script: |
#!/usr/bin/env node
console.log("Hello from Node!")
- name: python
- image: mirror.gcr.io/python:3.12.4
+ image: python
script: |
#!/usr/bin/env python3
print("Hello from Python!")
- name: perl
- image: mirror.gcr.io/perl:devel-bullseye
+ image: perl:devel-bullseye
script: |
#!/usr/bin/perl
print "Hello from Perl!"
# Test that param values are replaced.
- name: params-applied
- image: mirror.gcr.io/python:3.12.4
+ image: python
script: |
#!/usr/bin/env python3
v = '$(params.PARAM)'
@@ -89,7 +89,7 @@ spec:
# Test that args are allowed and passed to the script as expected.
- name: args-allowed
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
args: ['hello', 'world']
script: |
#!/usr/bin/env bash
@@ -99,7 +99,7 @@ spec:
# Test that multiple dollar signs next to each other are not replaced by Kubernetes
- name: dollar-signs-allowed
- image: mirror.gcr.io/python:3.12.4
+ image: python
script: |
#!/usr/bin/env python3
if '$' != '\u0024':
@@ -118,7 +118,7 @@ spec:
# Test that bash scripts with variable evaluations work as expected
- name: bash-variable-evaluations
- image: mirror.gcr.io/bash
+ image: bash:5.1.8
script: |
#!/usr/bin/env bash
set -xe
diff --git a/upstream/examples/v1/taskruns/steps-run-in-order.yaml b/upstream/examples/v1/taskruns/steps-run-in-order.yaml
index a38b230c9fa..af39b00fd0f 100644
--- a/upstream/examples/v1/taskruns/steps-run-in-order.yaml
+++ b/upstream/examples/v1/taskruns/steps-run-in-order.yaml
@@ -5,8 +5,8 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
# NB: command is not set, so it must be looked up from the registry.
args: ['-c', 'sleep 3 && touch foo']
- - image: mirror.gcr.io/busybox
+ - image: busybox
args: ['-c', 'ls', 'foo']
diff --git a/upstream/examples/v1/taskruns/steptemplate-env-merge.yaml b/upstream/examples/v1/taskruns/steptemplate-env-merge.yaml
index a07cd6d0349..6b730f4c381 100644
--- a/upstream/examples/v1/taskruns/steptemplate-env-merge.yaml
+++ b/upstream/examples/v1/taskruns/steptemplate-env-merge.yaml
@@ -25,7 +25,7 @@ spec:
steps:
# Test the environment variables are set in the task
- name: foo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $FOO == "foo" ]]
@@ -33,7 +33,7 @@ spec:
- name: FOO
value: $(params.FOO)
- name: foobar
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $FOOBAR == "foobar" ]]
@@ -41,7 +41,7 @@ spec:
- name: FOOBAR
value: $(params.FOOBAR)
- name: bar
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $BAR == "bar" ]]
@@ -50,13 +50,13 @@ spec:
value: $(params.BAR)
# Use the env var from the stepTemplate
- name: qux-no-override
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $QUX == "original" ]]
# Override the env var in the stepTemplate
- name: qux-override
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
[[ $QUX == "override" ]]
diff --git a/upstream/examples/v1/taskruns/task-result.yaml b/upstream/examples/v1/taskruns/task-result.yaml
index 6c249dda873..116dea49daa 100644
--- a/upstream/examples/v1/taskruns/task-result.yaml
+++ b/upstream/examples/v1/taskruns/task-result.yaml
@@ -13,12 +13,12 @@ spec:
description: The current date in human readable format
steps:
- name: print-date-unix-timestamp
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
date +%s | tee $(results.current-date-unix-timestamp.path)
- name: print-date-human-readable
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
date | tee $(results.current-date-human-readable.path)
diff --git a/upstream/examples/v1/taskruns/task-volume-args.yaml b/upstream/examples/v1/taskruns/task-volume-args.yaml
index 7b938a084bd..8476f279bf1 100644
--- a/upstream/examples/v1/taskruns/task-volume-args.yaml
+++ b/upstream/examples/v1/taskruns/task-volume-args.yaml
@@ -17,7 +17,7 @@ spec:
type: string
steps:
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
cat /configmap/test.data
diff --git a/upstream/examples/v1/taskruns/template-volume.yaml b/upstream/examples/v1/taskruns/template-volume.yaml
index 425a03bc9ba..ba20638ddf7 100644
--- a/upstream/examples/v1/taskruns/template-volume.yaml
+++ b/upstream/examples/v1/taskruns/template-volume.yaml
@@ -6,7 +6,7 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo some stuff > /im/a/custom/mount/path/file
@@ -14,7 +14,7 @@ spec:
- name: custom
mountPath: /im/a/custom/mount/path
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
cat /short/and/stout/file
diff --git a/upstream/examples/v1/taskruns/unnamed-steps.yaml b/upstream/examples/v1/taskruns/unnamed-steps.yaml
index 72abcb863cc..bc5c3cdc25a 100644
--- a/upstream/examples/v1/taskruns/unnamed-steps.yaml
+++ b/upstream/examples/v1/taskruns/unnamed-steps.yaml
@@ -5,9 +5,9 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: 'true'
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: 'true'
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: 'true'
diff --git a/upstream/examples/v1/taskruns/using_context_variables.yaml b/upstream/examples/v1/taskruns/using_context_variables.yaml
index 1380fb6fdb5..572e2aa7507 100644
--- a/upstream/examples/v1/taskruns/using_context_variables.yaml
+++ b/upstream/examples/v1/taskruns/using_context_variables.yaml
@@ -5,11 +5,11 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: print-uid
script: |
echo "TaskRunUID name: $(context.taskRun.uid)"
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
name: print-names
script: |
echo "Task name: $(context.task.name)"
diff --git a/upstream/examples/v1/taskruns/workingdir.yaml b/upstream/examples/v1/taskruns/workingdir.yaml
index 68c20f14901..5d4c3b77274 100644
--- a/upstream/examples/v1/taskruns/workingdir.yaml
+++ b/upstream/examples/v1/taskruns/workingdir.yaml
@@ -6,14 +6,14 @@ spec:
taskSpec:
steps:
- name: default
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
workingDir: /workspace
script: |
#!/usr/bin/env bash
[[ $PWD == /workspace ]]
- name: override
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
workingDir: '/a/path/too/far'
script: |
#!/usr/bin/env bash
diff --git a/upstream/examples/v1/taskruns/workspace-in-sidecar.yaml b/upstream/examples/v1/taskruns/workspace-in-sidecar.yaml
index deba1867656..ef6407b71ab 100644
--- a/upstream/examples/v1/taskruns/workspace-in-sidecar.yaml
+++ b/upstream/examples/v1/taskruns/workspace-in-sidecar.yaml
@@ -18,7 +18,7 @@ spec:
workspaces:
- name: signals
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine:3.12.0
computeResources:
requests:
memory: "16Mi"
@@ -34,7 +34,7 @@ spec:
echo "Sidecar responded: ${response}"
echo "Step Done."
sidecars:
- - image: mirror.gcr.io/alpine
+ - image: alpine:3.12.0
computeResources:
requests:
memory: "16Mi"
diff --git a/upstream/examples/v1/taskruns/workspace-readonly.yaml b/upstream/examples/v1/taskruns/workspace-readonly.yaml
index b23f5b5bab5..ff11a427fdf 100644
--- a/upstream/examples/v1/taskruns/workspace-readonly.yaml
+++ b/upstream/examples/v1/taskruns/workspace-readonly.yaml
@@ -29,19 +29,19 @@ spec:
readOnly: true
steps:
- name: write-allowed
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo "hello" > $(workspaces.write-allowed.path)/foo
- name: read-allowed
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.write-allowed.path)/foo | grep "hello"
- name: write-disallowed
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script:
echo "goodbye" > $(workspaces.write-disallowed.path)/foo || touch write-failed.txt
test -f write-failed.txt
- name: read-again
# We should get "hello" when reading again because writing "goodbye" to
# the file should have been disallowed.
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script:
cat $(workspaces.write-disallowed.path)/foo | grep "hello"
diff --git a/upstream/examples/v1/taskruns/workspace-volume.yaml b/upstream/examples/v1/taskruns/workspace-volume.yaml
index 64d3ae58eed..980bc7499e3 100644
--- a/upstream/examples/v1/taskruns/workspace-volume.yaml
+++ b/upstream/examples/v1/taskruns/workspace-volume.yaml
@@ -6,14 +6,14 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo some stuff > /workspace/stuff
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat /workspace/stuff
- name: override-workspace
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
# /workspace/stuff *doesn't* exist.
script: |
#!/usr/bin/env bash
diff --git a/upstream/examples/v1/taskruns/workspace-with-volumeClaimTemplate.yaml b/upstream/examples/v1/taskruns/workspace-with-volumeClaimTemplate.yaml
index 53cc8a10d9c..7cc86e2d9f7 100644
--- a/upstream/examples/v1/taskruns/workspace-with-volumeClaimTemplate.yaml
+++ b/upstream/examples/v1/taskruns/workspace-with-volumeClaimTemplate.yaml
@@ -6,7 +6,7 @@ spec:
taskSpec:
steps:
- name: list-files
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: ls $(workspaces.read-allowed.path)
workspaces:
- name: read-allowed
diff --git a/upstream/examples/v1/taskruns/workspace.yaml b/upstream/examples/v1/taskruns/workspace.yaml
index 7f678e680c3..818a5176482 100644
--- a/upstream/examples/v1/taskruns/workspace.yaml
+++ b/upstream/examples/v1/taskruns/workspace.yaml
@@ -55,28 +55,28 @@ spec:
taskSpec:
steps:
- name: write
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(workspaces.custom.volume) > $(workspaces.custom.path)/foo
- name: read
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.custom.path)/foo | grep $(workspaces.custom.volume)
- name: write2
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(workspaces.custom2.path) > $(workspaces.custom2.path)/foo
- name: read2
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.custom2.path)/foo | grep $(workspaces.custom2.path)
- name: write3
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(workspaces.custom3.path) > $(workspaces.custom3.path)/foo
- name: read3
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.custom3.path)/foo | grep $(workspaces.custom3.path)
- name: readconfigmap
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: cat $(workspaces.custom4.path)/my-message.txt | grep "hello world"
- name: readsecret
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
set -xe
diff --git a/upstream/examples/v1beta1/pipelineruns/clustertask-pipelinerun.yaml b/upstream/examples/v1beta1/pipelineruns/clustertask-pipelinerun.yaml
index 458043ca5dd..3ea557489e6 100644
--- a/upstream/examples/v1beta1/pipelineruns/clustertask-pipelinerun.yaml
+++ b/upstream/examples/v1beta1/pipelineruns/clustertask-pipelinerun.yaml
@@ -7,7 +7,7 @@ metadata:
spec:
steps:
- name: task-two-step-one
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
command: ["/bin/bash"]
args: ['-c', 'echo success']
---
diff --git a/upstream/examples/v1beta1/taskruns/clustertask.yaml b/upstream/examples/v1beta1/taskruns/clustertask.yaml
index 1dd8b6b8696..79a4ecd1a89 100644
--- a/upstream/examples/v1beta1/taskruns/clustertask.yaml
+++ b/upstream/examples/v1beta1/taskruns/clustertask.yaml
@@ -6,7 +6,7 @@ metadata:
name: clustertask-v1beta1
spec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo hello
---
apiVersion: tekton.dev/v1beta1
diff --git a/upstream/examples/v1beta1/taskruns/no-ci/tekton-bundles.yaml b/upstream/examples/v1beta1/taskruns/no-ci/tekton-bundles.yaml
new file mode 100644
index 00000000000..ac659ad8520
--- /dev/null
+++ b/upstream/examples/v1beta1/taskruns/no-ci/tekton-bundles.yaml
@@ -0,0 +1,9 @@
+# TODO: Move the example image to a tekton owned repo.
+apiVersion: tekton.dev/v1beta1
+kind: TaskRun
+metadata:
+ name: remote-task-reference
+spec:
+ taskRef:
+ name: hello-world
+ bundle: docker.io/ptasci67/example-oci@sha256:053a6cb9f3711d4527dd0d37ac610e8727ec0288a898d5dfbd79b25bcaa29828
diff --git a/upstream/go.mod b/upstream/go.mod
index 1a85fb966af..b2a697d1967 100644
--- a/upstream/go.mod
+++ b/upstream/go.mod
@@ -1,58 +1,66 @@
module github.com/tektoncd/pipeline
-go 1.22
-toolchain go1.22.5
+go 1.21
require (
- github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20220720053627-e327d0730470 // Waiting for https://github.com/ahmetb/gen-crd-api-reference-docs/pull/43/files to merge
github.com/cloudevents/sdk-go/v2 v2.15.2
- github.com/containerd/containerd v1.7.20
+ github.com/containerd/containerd v1.7.15
github.com/go-git/go-git/v5 v5.12.0
github.com/google/go-cmp v0.6.0
- github.com/google/go-containerregistry v0.19.2
+ github.com/google/go-containerregistry v0.19.1
github.com/google/uuid v1.6.0
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v1.0.2
- github.com/jenkins-x/go-scm v1.14.37
+ github.com/jenkins-x/go-scm v1.14.30
github.com/mitchellh/go-homedir v1.1.0
github.com/opencontainers/image-spec v1.1.0
github.com/pkg/errors v0.9.1
- github.com/sigstore/sigstore v1.8.4
- github.com/spiffe/go-spiffe/v2 v2.3.0
- github.com/spiffe/spire-api-sdk v1.10.0
+ github.com/sigstore/sigstore v1.8.3
+ github.com/spiffe/go-spiffe/v2 v2.2.0
+ github.com/spiffe/spire-api-sdk v1.9.4
github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb
go.opencensus.io v0.24.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc
- golang.org/x/oauth2 v0.22.0 // indirect
+ golang.org/x/oauth2 v0.18.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0
- k8s.io/api v0.29.6
- k8s.io/apimachinery v0.29.7
- k8s.io/client-go v0.29.6
- k8s.io/code-generator v0.29.7
+ k8s.io/api v0.28.5
+ k8s.io/apimachinery v0.29.0
+ k8s.io/client-go v0.28.5
+ k8s.io/code-generator v0.28.5
k8s.io/klog v1.0.0
- k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00
- knative.dev/pkg v0.0.0-20240416145024-0f34a8815650
+ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9
+ knative.dev/pkg v0.0.0-20240116073220-b488e7be5902
sigs.k8s.io/yaml v1.4.0
)
+// TODO: Remove this once https://github.com/knative/pkg/issues/2759 is fixed
+replace (
+ k8s.io/api => k8s.io/api v0.28.5
+ k8s.io/apimachinery => k8s.io/apimachinery v0.28.5
+ k8s.io/client-go => k8s.io/client-go v0.28.5
+ k8s.io/code-generator => k8s.io/code-generator v0.28.5
+)
+
require (
code.gitea.io/sdk/gitea v0.18.0
github.com/go-jose/go-jose/v3 v3.0.3
github.com/goccy/kpoward v0.1.0
github.com/google/cel-go v0.20.1
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20240108195214-a0658aa1d0cc
- github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.4
- github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.4
- github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.4
- github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.4
- go.opentelemetry.io/otel v1.28.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
- go.opentelemetry.io/otel/sdk v1.28.0
- go.opentelemetry.io/otel/trace v1.28.0
- k8s.io/utils v0.0.0-20240102154912-e7106e64919e
+ github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20240108195214-a0658aa1d0cc
+ github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3
+ github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3
+ github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3
+ github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3
+ go.opentelemetry.io/otel v1.26.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0
+ go.opentelemetry.io/otel/sdk v1.26.0
+ go.opentelemetry.io/otel/trace v1.26.0
+ k8s.io/utils v0.0.0-20230726121419-3b25d923346b
)
require (
@@ -68,58 +76,51 @@ require (
)
require (
- cloud.google.com/go v0.113.0 // indirect
- cloud.google.com/go/auth v0.4.1 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
- cloud.google.com/go/compute/metadata v0.5.0 // indirect
- cloud.google.com/go/iam v1.1.8 // indirect
- cloud.google.com/go/kms v1.17.1 // indirect
- cloud.google.com/go/longrunning v0.5.7 // indirect
+ cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/iam v1.1.6 // indirect
+ cloud.google.com/go/kms v1.15.8 // indirect
dario.cat/mergo v1.0.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
- github.com/Microsoft/hcsshim v0.11.7 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
+ github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/kms v1.32.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/log v0.1.0 // indirect
- github.com/containerd/platforms v0.2.1 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
- github.com/emicklei/go-restful/v3 v3.11.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
- github.com/go-jose/go-jose/v4 v4.0.2 // indirect
+ github.com/go-jose/go-jose/v4 v4.0.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
+ github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20240108195214-a0658aa1d0cc // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.4 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
+ github.com/googleapis/gax-go/v2 v2.12.3 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
- github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/hashicorp/vault/api v1.14.0 // indirect
+ github.com/hashicorp/vault/api v1.12.2 // indirect
github.com/jellydator/ttlcache/v3 v3.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
@@ -130,21 +131,16 @@ require (
github.com/zeebo/errs v1.3.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
- go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect
+ go.opentelemetry.io/otel/metric v1.26.0 // indirect
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
)
-// TODO: Remove this once github.com/google/go-containerregistry uses github.com/aws/aws-sdk-go-v2 >v1.23.0
-replace (
- github.com/aws/aws-sdk-go-v2/service/ecr => github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3
- github.com/aws/aws-sdk-go-v2/service/ecrpublic => github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3
-)
-
require (
+ cloud.google.com/go/compute v1.24.0 // indirect
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
@@ -156,39 +152,39 @@ require (
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/aws/aws-sdk-go-v2 v1.27.0 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.27.16 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.17.16 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.26.0 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.27.9 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.9 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 // indirect
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect
- github.com/aws/smithy-go v1.20.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 // indirect
+ github.com/aws/smithy-go v1.20.1 // indirect
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/blendle/zapdriver v1.3.1 // indirect
github.com/bluekeyes/go-gitdiff v0.7.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
- github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/docker/cli v24.0.7+incompatible // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
- github.com/docker/docker v26.1.5+incompatible // indirect
+ github.com/docker/docker v26.0.0+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
@@ -198,13 +194,13 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/hashicorp/go-version v1.7.0
+ github.com/hashicorp/go-version v1.6.0
github.com/imdario/mergo v0.3.13 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
- github.com/klauspost/compress v1.16.7 // indirect
+ github.com/klauspost/compress v1.16.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@@ -214,9 +210,9 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_golang v1.19.0 // indirect
- github.com/prometheus/client_model v0.6.0 // indirect
- github.com/prometheus/common v0.52.3 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.46.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -228,27 +224,28 @@ require (
github.com/vbatts/tar-split v0.11.3 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.31.0 // indirect
- golang.org/x/mod v0.17.0 // indirect
- golang.org/x/net v0.28.0 // indirect
- golang.org/x/sync v0.10.0
- golang.org/x/sys v0.28.0 // indirect
- golang.org/x/term v0.27.0 // indirect
- golang.org/x/text v0.21.0 // indirect
+ golang.org/x/crypto v0.22.0 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.24.0 // indirect
+ golang.org/x/sync v0.7.0
+ golang.org/x/sys v0.19.0 // indirect
+ golang.org/x/term v0.19.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/api v0.181.0 // indirect
- google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect
- google.golang.org/grpc v1.67.0
- google.golang.org/protobuf v1.34.2
+ golang.org/x/tools v0.17.0 // indirect
+ google.golang.org/api v0.171.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
+ google.golang.org/grpc v1.63.2
+ google.golang.org/protobuf v1.33.0
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/apiextensions-apiserver v0.29.2 // indirect
- k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect
- k8s.io/klog/v2 v2.120.1 // indirect
+ k8s.io/apiextensions-apiserver v0.28.5 // indirect
+ k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect
+ k8s.io/klog/v2 v2.100.1 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
replace github.com/ahmetb/gen-crd-api-reference-docs => github.com/tektoncd/ahmetb-gen-crd-api-reference-docs v0.3.1-0.20220729140133-6ce2d5aafcb4 // Waiting for https://github.com/ahmetb/gen-crd-api-reference-docs/pull/43/files to merge
diff --git a/upstream/go.sum b/upstream/go.sum
index e5c800de483..6c3a4ab7e29 100644
--- a/upstream/go.sum
+++ b/upstream/go.sum
@@ -20,29 +20,26 @@ cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECH
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA=
-cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8=
-cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg=
-cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro=
-cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
-cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
+cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
-cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
+cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
+cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
-cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
-cloud.google.com/go/kms v1.17.1 h1:5k0wXqkxL+YcXd4viQzTqCgzzVKKxzgrK+rCZJytEQs=
-cloud.google.com/go/kms v1.17.1/go.mod h1:DCMnCF/apA6fZk5Cj4XsD979OyHAqFasPuA5Sd0kGlQ=
-cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
-cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
+cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc=
+cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI=
+cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs=
+cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -64,12 +61,12 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s=
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
@@ -78,12 +75,9 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
@@ -95,17 +89,15 @@ github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZy
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -119,8 +111,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JP
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
-github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
-github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@@ -128,8 +120,8 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ=
-github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU=
+github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
+github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
@@ -161,56 +153,52 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.53.10 h1:3enP5l5WtezT9Ql+XZqs56JBf5YUd/FEzTCg///OIGY=
-github.com/aws/aws-sdk-go v1.53.10/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU=
+github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
+github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA=
github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I=
-github.com/aws/aws-sdk-go-v2 v1.27.0 h1:7bZWKoXhzI+mMR/HjdMx8ZCC5+6fY0lS5tr0bbgiLlo=
-github.com/aws/aws-sdk-go-v2 v1.27.0/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
-github.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM=
-github.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas=
+github.com/aws/aws-sdk-go-v2/config v1.27.9 h1:gRx/NwpNEFSk+yQlgmk1bmxxvQ5TyJ76CWXs9XScTqg=
+github.com/aws/aws-sdk-go-v2/config v1.27.9/go.mod h1:dK1FQfpwpql83kbD873E9vz4FyAxuJtR22wzoXn3qq0=
github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.9 h1:N8s0/7yW+h8qR8WaRlPQeJ6czVMNQVNtNdUqf6cItao=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.9/go.mod h1:446YhIdmSV0Jf/SLafGZalQo+xr2iw7/fzXGDPTU1yQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0 h1:af5YzcLf80tv4Em4jWVD75lpnOHSBkPUZxZfGkrI3HI=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.0/go.mod h1:nQ3how7DMnFMWiU1SpECohgC82fpn4cKZ875NDMmwtA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4 h1:0ScVK/4qZ8CIW0k8jOeFVsyS/sAiXpYxRBLolMkuLQM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.4/go.mod h1:84KyjNZdHC6QZW08nfHI6yZgPd+qRgaWcYsyLUo3QY8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7 h1:lf/8VTF2cM+N4SLzaYJERKEWAXq8MOMpZfU6wEPWsPk=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.7/go.mod h1:4SjkU7QiqK2M9oozyMzfZ/23LmUY+h3oFqhdeP5OMiI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4 h1:sHmMWWX5E7guWEFQ9SVo6A3S4xpPrWnd77a6y4WM6PU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.4/go.mod h1:WjpDrhWisWOIoS9n3nk67A3Ll1vfULJ9Kq6h29HTD48=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 h1:4OYVp0705xu8yjdyoWix0r9wPIRXnIzzOoUpQVHIJ/g=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7/go.mod h1:vd7ESTEvI76T2Na050gODNmNU7+OyKrIKroYTu4ABiI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3 h1:gfgt0D8MGL3gHrJPEv4rcWptA4Nz7uYn25ls8lLiANw=
-github.com/aws/aws-sdk-go-v2/service/ecr v1.27.3/go.mod h1:O5Fvd41s5KfDG093xLM7FhGiH6EmhmEli5D5MQH3TWw=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3 h1:gaq/4fd2/bQeJ33m4csgL7DJHrrmvGhqnrsxchNr46c=
-github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.3/go.mod h1:vn+Rz9fAFGJtDXbBmYdTc71Q8iF/W/uK1/ec93hinD8=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11 h1:wlTgmb/sCmVRJrN5De3CiHj4v/bTCgL5+qpdEd0CPtw=
+github.com/aws/aws-sdk-go-v2/service/ecr v1.18.11/go.mod h1:Ce1q2jlNm8BVpjLaOnwnm5v2RClAbK6txwPljFzyW6c=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2 h1:yflJrGmi1pXtP9lOpOeaNZyc0vXnJTuP2sor3nJcGGo=
+github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.16.2/go.mod h1:uHtRE7aqXNmpeYL+7Ec7LacH5zC9+w2T5MBOeEKDdu0=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
-github.com/aws/aws-sdk-go-v2/service/kms v1.32.1 h1:FARrQLRQXpCFYylIUVF1dRij6YbPCmtwudq9NBk4kFc=
-github.com/aws/aws-sdk-go-v2/service/kms v1.32.1/go.mod h1:8lETO9lelSG2B6KMXFh2OwPPqGV6WQM3RqLAEjP1xaU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6 h1:b+E7zIUHMmcB4Dckjpkapoy47W6C9QBv/zoUP+Hn8Kc=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.6/go.mod h1:S2fNV0rxrP78NhPbCZeQgY8H9jdDMeGtwcfZIRxzBqU=
+github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU=
+github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=
-github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.3 h1:mnbuWHOcM70/OFUlZZ5rcdfA8PflGXXiefU/O+1S3+8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.20.3/go.mod h1:5HFu51Elk+4oRBZVxmHrSds5jFXmFj8C3w7DVF2gnrs=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3 h1:uLq0BKatTmDzWa/Nu4WO0M1AaQDaPpwTKAeByEc6WFM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.3/go.mod h1:b+qdhjnxj8GSR6t5YfphOffeoQSQ1KmpoVVuBn+PWxs=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g=
-github.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.5 h1:J/PpTf/hllOjx8Xu9DMflff3FajfLxqM5+tepvVXmxg=
+github.com/aws/aws-sdk-go-v2/service/sts v1.28.5/go.mod h1:0ih0Z83YDH/QeQ6Ori2yGE2XvWYv/Xm+cZc01LC6oK0=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw=
github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
-github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
-github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7 h1:G5IT+PEpFY0CDb3oITDP9tkmLrHkVD8Ny+elUmBqVYI=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20230510185313-f5e39e5f34c7/go.mod h1:VVALgT1UESBh91dY0GprHnT1Z7mKd96VDk8qVy+bmu0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -246,8 +234,8 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
-github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4=
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM=
@@ -304,8 +292,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ=
-github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0=
+github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes=
+github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -335,8 +323,6 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
-github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
@@ -404,6 +390,8 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
@@ -413,8 +401,8 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g=
-github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v26.0.0+incompatible h1:Ng2qi+gdKADUa/VM+6b6YaY2nlZhk/lVJiKR/2bMudU=
+github.com/docker/docker v26.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@@ -425,17 +413,17 @@ github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
+github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
@@ -450,15 +438,14 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
+github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -485,8 +472,8 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
-github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
-github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
+github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U=
+github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -500,9 +487,12 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
@@ -511,6 +501,7 @@ github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
@@ -519,6 +510,7 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@@ -545,8 +537,8 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
-github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
+github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -580,11 +572,13 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@@ -606,8 +600,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw=
-github.com/google/go-containerregistry v0.19.2 h1:TannFKE1QSajsP6hPWb5oJNgKe1IKjHukIKDUmvsV6w=
-github.com/google/go-containerregistry v0.19.2/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
+github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
+github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20240108195214-a0658aa1d0cc h1:eJ9J17+23quNw5z6O9AdTH+irI7JI+6eQX9TswViyvk=
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20240108195214-a0658aa1d0cc/go.mod h1:Ek+8PQrShkA7aHEj3/zSW33wU0V/Bx3zW/gFh7l21xY=
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20240108195214-a0658aa1d0cc h1:fHDosK/RhxYQpWBRo+bbawVuR402odSaNToA0Pp+ojw=
@@ -633,6 +627,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
@@ -643,14 +638,15 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
-github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
+github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
+github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@@ -667,8 +663,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
@@ -680,16 +676,17 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
-github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
+github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
-github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
-github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
+github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
+github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@@ -704,8 +701,8 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -717,12 +714,12 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU=
-github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk=
+github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE=
+github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@@ -734,8 +731,8 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE=
github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
-github.com/jenkins-x/go-scm v1.14.37 h1:Tq59JXyg5p4iuvIKf6+EA+Yzgxgpn/yG/yfM1mL8DDg=
-github.com/jenkins-x/go-scm v1.14.37/go.mod h1:MRLj/i0mhpMtqwwZV+x78SkEB8mx9rv3ebdRg9WunS8=
+github.com/jenkins-x/go-scm v1.14.30 h1:UWhpY1906IQn4FtFo5CAuhpZtsHLnIjYlKinmDkBvXo=
+github.com/jenkins-x/go-scm v1.14.30/go.mod h1:9MGEHpudIt+JypKTf4zUHpiRggl5piIb9xcLu+1FrbU=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -773,8 +770,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
-github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
+github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -856,11 +853,11 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -868,18 +865,43 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
-github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
+github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
+github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
+github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
+github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
+github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
-github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
-github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
+github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
+github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
+github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
+github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -920,6 +942,7 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -942,15 +965,15 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
-github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -961,8 +984,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA=
-github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -984,8 +1007,10 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -1008,16 +1033,16 @@ github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iL
github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk=
github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
-github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.4 h1:okxaVlaTrQowE1FA4UQ3rw54f7BUjdnzERIxbZTBZuc=
-github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.4/go.mod h1:jkcPErmnCECuSJajUaUq5pwCMOeBF19VzQo6bv4l1D0=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.4 h1:1G6uLTZaqvu867DbgH7p75L6Y7Tu8LLnYJGZnWsTUu8=
-github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.4/go.mod h1:QtKKb8DChi1mRi9xSNr8ImSQu6m+0MZAV0sYIoPOta0=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.4 h1:fjnDR5Lw9ElfOSRUGKkgwjaynqj93nLu0twAw+QxhHE=
-github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.4/go.mod h1:9KFn5MwelyNoFXu3gNyVzvN/yAhcL6FE053oxih9+vM=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.4 h1:QEXOb+feQmNOyLVT+FrghBqKKK4QDMP5dyic8RZHXdE=
-github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.4/go.mod h1:ohOhV9zclcIpNAWS0kq2ASB3EPPuRce2HjgXXaU3pKQ=
+github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4=
+github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw=
+github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g=
+github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc=
+github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg=
+github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -1057,10 +1082,10 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8=
-github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY=
-github.com/spiffe/spire-api-sdk v1.10.0 h1:QFZ8fucWhbV4y4TKWKLxGc7SNrCLThn2t5qpVNIiiRY=
-github.com/spiffe/spire-api-sdk v1.10.0/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI=
+github.com/spiffe/go-spiffe/v2 v2.2.0 h1:9Vf06UsvsDbLYK/zJ4sYsIsHmMFknUD+feA7IYoWMQY=
+github.com/spiffe/go-spiffe/v2 v2.2.0/go.mod h1:Urzb779b3+IwDJD2ZbN8fVl3Aa8G4N/PiUe6iXC0XxU=
+github.com/spiffe/spire-api-sdk v1.9.4 h1:SbC37G2vwX/ojULl+avWvye4ITHYBorcC7+8ExWpNjA=
+github.com/spiffe/spire-api-sdk v1.9.4/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
@@ -1130,6 +1155,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
@@ -1157,18 +1183,18 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.4
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
+go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38=
+go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30=
+go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4=
+go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8=
+go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs=
+go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
+go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
@@ -1210,12 +1236,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
-golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1253,10 +1281,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1293,6 +1326,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@@ -1307,18 +1341,27 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1333,8 +1376,9 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
-golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
+golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1349,8 +1393,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
-golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1422,6 +1466,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1438,8 +1483,11 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1449,21 +1497,29 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
-golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1473,18 +1529,21 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
-golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1541,19 +1600,26 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
@@ -1581,8 +1647,8 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4=
-google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k=
+google.golang.org/api v0.171.0 h1:w174hnBPqut76FzW5Qaupt7zY8Kql6fiVjgys4f58sU=
+google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1590,6 +1656,8 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1637,12 +1705,12 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw=
-google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
+google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1670,8 +1738,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
-google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
+google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1688,8 +1756,9 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1749,28 +1818,19 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/api v0.29.6 h1:eDxIl8+PeEpwbe2YyS5RXJ9vdn4hnKWMBf4WUJP9DQM=
-k8s.io/api v0.29.6/go.mod h1:ZuUPMhJV74DJXapldbg6upaHfiOjrBb+0ffUbBi1jaw=
-k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg=
-k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc=
-k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y=
+k8s.io/api v0.28.5 h1:XIPNr3nBgTEaCdEiwZ+dXaO9SB4NeTOZ2pNDRrFgfb4=
+k8s.io/api v0.28.5/go.mod h1:98zkTCc60iSnqqCIyCB1GI7PYDiRDYTSfL0PRIxpM4c=
+k8s.io/apiextensions-apiserver v0.28.5 h1:YKW9O9T/0Gkyl6LTFDLIhCbouSRh+pHt2vMLB38Snfc=
+k8s.io/apiextensions-apiserver v0.28.5/go.mod h1:7p7TQ0X9zCJLNFlOTi5dncAi2dkPsdsrcvu5ILa7PEk=
+k8s.io/apimachinery v0.28.5 h1:EEj2q1qdTcv2p5wl88KavAn3VlFRjREgRu8Sm/EuMPY=
+k8s.io/apimachinery v0.28.5/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/client-go v0.29.6 h1:5E2ebuB/p0F0THuQatyvhDvPL2SIeqwTPrtnrwKob/8=
-k8s.io/client-go v0.29.6/go.mod h1:jHZcrQqDplyv20v7eu+iFM4gTpglZSZoMVcKrh8sRGg=
-k8s.io/code-generator v0.29.7 h1:NEwmKOJVNObCh3upBLEojL1QuJMzGplOTYZnee4h0TY=
-k8s.io/code-generator v0.29.7/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE=
+k8s.io/client-go v0.28.5 h1:6UNmc33vuJhh3+SAOEKku3QnKa+DtPKGnhO2MR0IEbk=
+k8s.io/client-go v0.28.5/go.mod h1:+pt086yx1i0HAlHzM9S+RZQDqdlzuXFl4hY01uhpcpA=
+k8s.io/code-generator v0.28.5 h1:6LXs+I/LOMGNLVI7z8xImLjI98o9vcwiHiQY6PyqpmU=
+k8s.io/code-generator v0.28.5/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
@@ -1779,24 +1839,28 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 h1:izq7u3SJBdOAuA5YYe1/PIp9jczrih/jGlKRRt0G7bQ=
-k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
+k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
-k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
-k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 h1:m2ahFUO0L2VrgGDYdyOUFdE6xBd3pLXAJozLJwqLRQM=
-knative.dev/pkg v0.0.0-20240416145024-0f34a8815650/go.mod h1:soFw5ss08G4PU3JiFDKqiZRd2U7xoqcfNpJP1coIXkY=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
+k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 h1:H6+JJN23fhwYWCHY1339sY6uhIyoUwDy1a8dN233fdk=
+knative.dev/pkg v0.0.0-20240116073220-b488e7be5902/go.mod h1:NYk8mMYoLkO7CQWnNkti4YGGnvLxN6MIDbUvtgeo0C0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
@@ -1806,9 +1870,10 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/upstream/hack/update-deps.sh b/upstream/hack/update-deps.sh
index 64866ea7959..65a15ab118e 100755
--- a/upstream/hack/update-deps.sh
+++ b/upstream/hack/update-deps.sh
@@ -22,7 +22,7 @@ source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scri
cd ${REPO_ROOT_DIR}
-KNATIVE_VERSION="release-1.14"
+KNATIVE_VERSION="release-1.13"
# The list of dependencies that we track at HEAD and periodically
# float forward in this repository.
diff --git a/upstream/internal/artifactref/artifactref.go b/upstream/internal/artifactref/artifactref.go
index cd8e1162ce3..b5a46a73023 100644
--- a/upstream/internal/artifactref/artifactref.go
+++ b/upstream/internal/artifactref/artifactref.go
@@ -2,17 +2,10 @@ package artifactref
import "regexp"
-// case 1: steps..inputs.
-// case 2: steps..outputs.
-const stepArtifactUsagePattern = `\$\(steps\.([^.]+)\.(?:inputs|outputs)\.([^.)]+)\)`
-
-// case 1: tasks..inputs.
-// case 2: tasks..outputs.
-const taskArtifactUsagePattern = `\$\(tasks\.([^.]+)\.(?:inputs|outputs)\.([^.)]+)\)`
-
-const StepArtifactPathPattern = `step.artifacts.path`
-
-const TaskArtifactPathPattern = `artifacts.path`
+// case 1: steps..inputs
+// case 2: steps..outputs
+// case 3: steps..inputs.
+// case 4: steps..outputs.
+const stepArtifactUsagePattern = `\$\(steps\.([^.]+)\.(?:inputs|outputs)(?:\.([^.^\)]+))?\)`
var StepArtifactRegex = regexp.MustCompile(stepArtifactUsagePattern)
-var TaskArtifactRegex = regexp.MustCompile(taskArtifactUsagePattern)
diff --git a/upstream/internal/sidecarlogresults/sidecarlogresults.go b/upstream/internal/sidecarlogresults/sidecarlogresults.go
index 823fe448075..e9ce11b86db 100644
--- a/upstream/internal/sidecarlogresults/sidecarlogresults.go
+++ b/upstream/internal/sidecarlogresults/sidecarlogresults.go
@@ -28,8 +28,6 @@ import (
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/result"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
@@ -37,19 +35,13 @@ import (
)
// ErrSizeExceeded indicates that the result exceeded its maximum allowed size
-var (
- ErrSizeExceeded = errors.New("results size exceeds configured limit")
- stepDir = pipeline.StepsDir
-)
+var ErrSizeExceeded = errors.New("results size exceeds configured limit")
type SidecarLogResultType string
const (
- taskResultType SidecarLogResultType = "task"
- stepResultType SidecarLogResultType = "step"
-
- stepArtifactType SidecarLogResultType = "stepArtifact"
- taskArtifactType SidecarLogResultType = "taskArtifact"
+ taskResultType SidecarLogResultType = "task"
+ stepResultType SidecarLogResultType = "step"
sidecarResultNameSeparator string = "."
)
@@ -149,6 +141,8 @@ func LookForResults(w io.Writer, runDir string, resultsDir string, resultNames [
results := make(chan SidecarLogResult)
g := new(errgroup.Group)
for _, resultFile := range resultNames {
+ resultFile := resultFile
+
g.Go(func() error {
newResult, err := readResults(resultsDir, resultFile, "", taskResultType)
if err != nil {
@@ -163,7 +157,10 @@ func LookForResults(w io.Writer, runDir string, resultsDir string, resultNames [
}
for sName, sresults := range stepResults {
+ sresults := sresults
+ sName := sName
for _, resultName := range sresults {
+ resultName := resultName
stepResultsDir := filepath.Join(stepResultsDir, sName, "results")
g.Go(func() error {
@@ -200,37 +197,6 @@ func LookForResults(w io.Writer, runDir string, resultsDir string, resultNames [
return nil
}
-// LookForArtifacts searches for and processes artifacts within a specified run directory.
-// It looks for "provenance.json" files within the "artifacts" subdirectory of each named step.
-// If the provenance file exists, the function extracts artifact information, formats it into a
-// JSON string, and encodes it for output alongside relevant metadata (step name, artifact type).
-func LookForArtifacts(w io.Writer, names []string, runDir string) error {
- if err := waitForStepsToFinish(runDir); err != nil {
- return err
- }
-
- for _, name := range names {
- p := filepath.Join(stepDir, name, "artifacts", "provenance.json")
- if exist, err := fileExists(p); err != nil {
- return err
- } else if !exist {
- continue
- }
- subRes, err := extractArtifactsFromFile(p)
- if err != nil {
- return err
- }
- values, err := json.Marshal(&subRes)
- if err != nil {
- return err
- }
- if err := encode(w, SidecarLogResult{Name: name, Value: string(values), Type: stepArtifactType}); err != nil {
- return err
- }
- }
- return nil
-}
-
// GetResultsFromSidecarLogs extracts results from the logs of the results sidecar
func GetResultsFromSidecarLogs(ctx context.Context, clientset kubernetes.Interface, namespace string, name string, container string, podPhase corev1.PodPhase) ([]result.RunResult, error) {
sidecarLogResults := []result.RunResult{}
@@ -284,12 +250,8 @@ func parseResults(resultBytes []byte, maxResultLimit int) (result.RunResult, err
resultType = result.TaskRunResultType
case stepResultType:
resultType = result.StepResultType
- case stepArtifactType:
- resultType = result.StepArtifactsResultType
- case taskArtifactType:
- resultType = result.TaskRunArtifactsResultType
default:
- return result.RunResult{}, fmt.Errorf("invalid sidecar result type %v. Must be %v or %v or %v", res.Type, taskResultType, stepResultType, stepArtifactType)
+ return result.RunResult{}, fmt.Errorf("invalid sidecar result type %v. Must be %v or %v", res.Type, taskResultType, stepResultType)
}
runResult = result.RunResult{
Key: res.Name,
@@ -298,19 +260,3 @@ func parseResults(resultBytes []byte, maxResultLimit int) (result.RunResult, err
}
return runResult, nil
}
-
-func parseArtifacts(fileContent []byte) (v1.Artifacts, error) {
- var as v1.Artifacts
- if err := json.Unmarshal(fileContent, &as); err != nil {
- return as, fmt.Errorf("invalid artifacts : %w", err)
- }
- return as, nil
-}
-
-func extractArtifactsFromFile(filename string) (v1.Artifacts, error) {
- b, err := os.ReadFile(filename)
- if err != nil {
- return v1.Artifacts{}, fmt.Errorf("error reading the results file %w", err)
- }
- return parseArtifacts(b)
-}
diff --git a/upstream/internal/sidecarlogresults/sidecarlogresults_test.go b/upstream/internal/sidecarlogresults/sidecarlogresults_test.go
index ed3d7ba1dbf..2f6a3de8dad 100644
--- a/upstream/internal/sidecarlogresults/sidecarlogresults_test.go
+++ b/upstream/internal/sidecarlogresults/sidecarlogresults_test.go
@@ -29,10 +29,10 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/result"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
)
@@ -80,7 +80,7 @@ func TestLookForResults_FanOutAndWait(t *testing.T) {
sort.Slice(wantResults, func(i int, j int) bool { return wantResults[i] < wantResults[j] })
sort.Slice(got.Bytes(), func(i int, j int) bool { return got.Bytes()[i] < got.Bytes()[j] })
if d := cmp.Diff(wantResults, got.Bytes()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -143,7 +143,7 @@ func TestLookForResults(t *testing.T) {
t.Fatalf("Did not expect any error but got: %v", err)
}
if d := cmp.Diff(want, got.Bytes()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -213,7 +213,7 @@ func TestLookForStepResults(t *testing.T) {
t.Fatalf("Did not expect any error but got: %v", err)
}
if d := cmp.Diff(want, got.Bytes()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -285,98 +285,27 @@ func TestParseResults(t *testing.T) {
Name: "result1",
Value: "foo",
Type: "task",
- },
- {
+ }, {
Name: "result2",
Value: `{"IMAGE_URL":"ar.com", "IMAGE_DIGEST":"sha234"}`,
Type: "task",
- },
- {
+ }, {
Name: "result3",
Value: `["hello","world"]`,
Type: "task",
- },
- {
+ }, {
Name: "step-foo.result1",
Value: "foo",
Type: "step",
- },
- {
+ }, {
Name: "step-foo.result2",
Value: `{"IMAGE_URL":"ar.com", "IMAGE_DIGEST":"sha234"}`,
Type: "step",
- },
- {
+ }, {
Name: "step-foo.result3",
Value: `["hello","world"]`,
Type: "step",
},
- {
- Name: "step-artifacts-result",
- Value: `{
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }`,
- Type: "stepArtifact",
- },
- {
- Name: "task-run-artifacts-result",
- Value: `{
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }`,
- Type: "taskArtifact",
- },
}
podLogs := []string{}
for _, r := range results {
@@ -407,70 +336,6 @@ func TestParseResults(t *testing.T) {
Key: "step-foo.result3",
Value: `["hello","world"]`,
ResultType: result.StepResultType,
- }, {
- Key: "step-artifacts-result",
- Value: `{
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }`,
- ResultType: result.StepArtifactsResultType,
- }, {
- Key: "task-run-artifacts-result",
- Value: `{
- "inputs":[
- {
- "name":"input-artifacts",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }`,
- ResultType: result.TaskRunArtifactsResultType,
}}
stepResults := []result.RunResult{}
for _, plog := range podLogs {
@@ -489,7 +354,7 @@ func TestParseResults_InvalidType(t *testing.T) {
results := []SidecarLogResult{{
Name: "result1",
Value: "foo",
- Type: "invalid",
+ Type: "not task or step",
}}
podLogs := []string{}
for _, r := range results {
@@ -498,7 +363,7 @@ func TestParseResults_InvalidType(t *testing.T) {
}
for _, plog := range podLogs {
_, err := parseResults([]byte(plog), 4096)
- wantErr := errors.New("invalid sidecar result type invalid. Must be task or step or stepArtifact")
+ wantErr := errors.New("invalid sidecar result type not task or step. Must be task or step")
if d := cmp.Diff(wantErr.Error(), err.Error()); d != "" {
t.Fatal(diff.PrintWantGot(d))
}
@@ -532,7 +397,7 @@ func TestParseResults_Failure(t *testing.T) {
func TestGetResultsFromSidecarLogs(t *testing.T) {
for _, c := range []struct {
desc string
- podPhase corev1.PodPhase
+ podPhase v1.PodPhase
wantError bool
}{{
desc: "pod pending to start",
@@ -546,7 +411,7 @@ func TestGetResultsFromSidecarLogs(t *testing.T) {
t.Run(c.desc, func(t *testing.T) {
ctx := context.Background()
clientset := fakekubeclientset.NewSimpleClientset()
- pod := &corev1.Pod{
+ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
@@ -555,15 +420,15 @@ func TestGetResultsFromSidecarLogs(t *testing.T) {
Name: "pod",
Namespace: "foo",
},
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "container",
Image: "image",
},
},
},
- Status: corev1.PodStatus{
+ Status: v1.PodStatus{
Phase: c.podPhase,
},
}
@@ -609,166 +474,6 @@ func TestExtractStepAndResultFromSidecarResultName_Error(t *testing.T) {
}
}
-func TestLookForArtifacts(t *testing.T) {
- base := basicArtifacts()
- modified := base.DeepCopy()
- modified.Outputs[0].Name = "tests"
- type Arg struct {
- stepName string
- artifacts *v1.Artifacts
- customContent []byte
- }
- tests := []struct {
- desc string
- wantErr bool
- args []Arg
- expected []SidecarLogResult
- }{
- {
- desc: "one step produces artifacts, read success",
- args: []Arg{{stepName: "first", artifacts: &base}},
- expected: []SidecarLogResult{{
- Name: "first",
- Type: stepArtifactType,
- Value: mustJSON(&base),
- }},
- },
- {
- desc: "two step produce artifacts, read success",
- args: []Arg{{stepName: "first", artifacts: &base}, {stepName: "second", artifacts: modified}},
- expected: []SidecarLogResult{{
- Name: "first",
- Type: stepArtifactType,
- Value: mustJSON(&base),
- }, {
- Name: "second",
- Type: stepArtifactType,
- Value: mustJSON(modified),
- }},
- },
- {
- desc: "one step produces artifacts, one step does not, read success",
- args: []Arg{{stepName: "first", artifacts: &base}, {stepName: "second"}},
- expected: []SidecarLogResult{{
- Name: "first",
- Type: stepArtifactType,
- Value: mustJSON(&base),
- }},
- },
- {
- desc: "two step produces, one read success, one not, error out and result is not empty.",
- args: []Arg{{stepName: "first", artifacts: &base}, {stepName: "second", artifacts: modified, customContent: []byte("this is to break json")}},
- expected: []SidecarLogResult{{
- Name: "first",
- Type: stepArtifactType,
- Value: mustJSON(&base),
- }},
- wantErr: true,
- },
- {
- desc: "two step produces, first read fails, error out and result is empty.",
- args: []Arg{{stepName: "first", artifacts: modified, customContent: []byte("this is to break json")}, {stepName: "second", artifacts: &base}},
- expected: []SidecarLogResult{},
- wantErr: true,
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- dir := t.TempDir()
- curStepDir := stepDir
- stepDir = dir
- t.Cleanup(func() {
- stepDir = curStepDir
- })
-
- var names []string
- for _, arg := range tc.args {
- names = append(names, arg.stepName)
- if err := os.MkdirAll(filepath.Join(dir, arg.stepName, "artifacts"), os.ModePerm); err != nil {
- t.Errorf("failed to create artifacts folder, err: %v", err)
- }
- if _, err := os.Create(filepath.Join(dir, arg.stepName, "out")); err != nil {
- t.Errorf("failed to file, err: %v", err)
- }
- if arg.artifacts != nil {
- if err := writeArtifacts(filepath.Join(dir, arg.stepName, "artifacts", "provenance.json"), arg.artifacts); err != nil {
- t.Errorf("failed to write artifacts to provenance.json, err: %v", err)
- }
- }
- if arg.customContent != nil {
- if err := os.WriteFile(filepath.Join(dir, arg.stepName, "artifacts", "provenance.json"), arg.customContent, os.ModePerm); err != nil {
- t.Errorf("failed to write customContent to provenance.json, err: %v", err)
- }
- }
- }
- var buf bytes.Buffer
- err := LookForArtifacts(&buf, names, dir)
- if (err != nil) != tc.wantErr {
- t.Errorf("error checking failed, wantErr: %v, got: %v", tc.wantErr, err)
- }
- want := ""
- for _, logResult := range tc.expected {
- want += mustJSON(logResult) + "\n"
- }
- got := buf.String()
-
- if d := cmp.Diff(want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func writeArtifacts(path string, artifacts *v1.Artifacts) error {
- f, err := os.Create(path)
- if err != nil {
- return err
- }
- defer f.Close()
- res := json.NewEncoder(f).Encode(artifacts)
- return res
-}
-
-func basicArtifacts() v1.Artifacts {
- data := `{
- "inputs":[
- {
- "name":"inputs",
- "values":[
- {
- "uri":"pkg:example.github.com/inputs",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "values":[
- {
- "uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
-`
- var ars v1.Artifacts
- err := json.Unmarshal([]byte(data), &ars)
- if err != nil {
- panic(err)
- }
- return ars
-}
-
func createStepResult(t *testing.T, dir, stepName, resultName, resultValue string) {
t.Helper()
resultDir := filepath.Join(dir, stepName, "results")
@@ -802,11 +507,3 @@ func createRun(t *testing.T, dir string, causeErr bool) {
t.Fatal(err)
}
}
-
-func mustJSON(data any) string {
- marshal, err := json.Marshal(data)
- if err != nil {
- panic(err)
- }
- return string(marshal)
-}
diff --git a/upstream/pkg/apis/config/feature_flags.go b/upstream/pkg/apis/config/feature_flags.go
index d0db62052cf..f8c057801a0 100644
--- a/upstream/pkg/apis/config/feature_flags.go
+++ b/upstream/pkg/apis/config/feature_flags.go
@@ -102,16 +102,12 @@ const (
EnableCELInWhenExpression = "enable-cel-in-whenexpression"
// EnableStepActions is the flag to enable the use of StepActions in Steps
EnableStepActions = "enable-step-actions"
+
// EnableArtifacts is the flag to enable the use of Artifacts in Steps
EnableArtifacts = "enable-artifacts"
+
// EnableParamEnum is the flag to enabled enum in params
EnableParamEnum = "enable-param-enum"
- // EnableConciseResolverSyntax is the flag to enable concise resolver syntax
- EnableConciseResolverSyntax = "enable-concise-resolver-syntax"
- // EnableKubernetesSidecar is the flag to enable kubernetes sidecar support
- EnableKubernetesSidecar = "enable-kubernetes-sidecar"
- // DefaultEnableKubernetesSidecar is the default value for EnableKubernetesSidecar
- DefaultEnableKubernetesSidecar = false
// DisableInlineSpec is the flag to disable embedded spec
// in Taskrun or Pipelinerun
@@ -122,16 +118,16 @@ const (
runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars"
awaitSidecarReadinessKey = "await-sidecar-readiness"
requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" //nolint:gosec
- // enableTektonOCIBundles = "enable-tekton-oci-bundles"
- enableAPIFields = "enable-api-fields"
- sendCloudEventsForRuns = "send-cloudevents-for-runs"
- enforceNonfalsifiability = "enforce-nonfalsifiability"
- verificationNoMatchPolicy = "trusted-resources-verification-no-match-policy"
- enableProvenanceInStatus = "enable-provenance-in-status"
- resultExtractionMethod = "results-from"
- maxResultSize = "max-result-size"
- setSecurityContextKey = "set-security-context"
- coscheduleKey = "coschedule"
+ enableTektonOCIBundles = "enable-tekton-oci-bundles"
+ enableAPIFields = "enable-api-fields"
+ sendCloudEventsForRuns = "send-cloudevents-for-runs"
+ enforceNonfalsifiability = "enforce-nonfalsifiability"
+ verificationNoMatchPolicy = "trusted-resources-verification-no-match-policy"
+ enableProvenanceInStatus = "enable-provenance-in-status"
+ resultExtractionMethod = "results-from"
+ maxResultSize = "max-result-size"
+ setSecurityContextKey = "set-security-context"
+ coscheduleKey = "coschedule"
)
// DefaultFeatureFlags holds all the default configurations for the feature flags configmap.
@@ -155,13 +151,13 @@ var (
// DefaultEnableStepActions is the default PerFeatureFlag value for EnableStepActions
DefaultEnableStepActions = PerFeatureFlag{
Name: EnableStepActions,
- Stability: BetaAPIFields,
- Enabled: DefaultBetaFeatureEnabled,
+ Stability: AlphaAPIFields,
+ Enabled: DefaultAlphaFeatureEnabled,
}
- // DefaultEnableArtifacts is the default PerFeatureFlag value for EnableArtifacts
+ // DefaultEnableArtifacts is the default PerFeatureFlag value for EnableStepActions
DefaultEnableArtifacts = PerFeatureFlag{
- Name: EnableArtifacts,
+ Name: EnableStepActions,
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
@@ -172,13 +168,6 @@ var (
Stability: AlphaAPIFields,
Enabled: DefaultAlphaFeatureEnabled,
}
-
- // DefaultEnableConciseResolverSyntax is the default PerFeatureFlag value for EnableConciseResolverSyntax
- DefaultEnableConciseResolverSyntax = PerFeatureFlag{
- Name: EnableConciseResolverSyntax,
- Stability: AlphaAPIFields,
- Enabled: DefaultAlphaFeatureEnabled,
- }
)
// FeatureFlags holds the features configurations
@@ -188,31 +177,29 @@ type FeatureFlags struct {
DisableCredsInit bool
RunningInEnvWithInjectedSidecars bool
RequireGitSSHSecretKnownHosts bool
- // EnableTektonOCIBundles bool // Deprecated: this is now ignored
- // ScopeWhenExpressionsToTask bool // Deprecated: this is now ignored
- EnableAPIFields string
- SendCloudEventsForRuns bool
- AwaitSidecarReadiness bool
- EnforceNonfalsifiability string
- EnableKeepPodOnCancel bool
+ EnableTektonOCIBundles bool
+ ScopeWhenExpressionsToTask bool
+ EnableAPIFields string
+ SendCloudEventsForRuns bool
+ AwaitSidecarReadiness bool
+ EnforceNonfalsifiability string
+ EnableKeepPodOnCancel bool
// VerificationNoMatchPolicy is the feature flag for "trusted-resources-verification-no-match-policy"
// VerificationNoMatchPolicy can be set to "ignore", "warn" and "fail" values.
// ignore: skip trusted resources verification when no matching verification policies found
// warn: skip trusted resources verification when no matching verification policies found and log a warning
// fail: fail the taskrun or pipelines run if no matching verification policies found
- VerificationNoMatchPolicy string
- EnableProvenanceInStatus bool
- ResultExtractionMethod string
- MaxResultSize int
- SetSecurityContext bool
- Coschedule string
- EnableCELInWhenExpression bool
- EnableStepActions bool
- EnableParamEnum bool
- EnableArtifacts bool
- DisableInlineSpec string
- EnableConciseResolverSyntax bool
- EnableKubernetesSidecar bool
+ VerificationNoMatchPolicy string
+ EnableProvenanceInStatus bool
+ ResultExtractionMethod string
+ MaxResultSize int
+ SetSecurityContext bool
+ Coschedule string
+ EnableCELInWhenExpression bool
+ EnableStepActions bool
+ EnableParamEnum bool
+ EnableArtifacts bool
+ DisableInlineSpec string
}
// GetFeatureFlagsConfigName returns the name of the configmap containing all
@@ -307,20 +294,27 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) {
if err := setPerFeatureFlag(EnableParamEnum, DefaultEnableParamEnum, &tc.EnableParamEnum); err != nil {
return nil, err
}
+
if err := setPerFeatureFlag(EnableArtifacts, DefaultEnableArtifacts, &tc.EnableArtifacts); err != nil {
return nil, err
}
-
if err := setFeatureInlineSpec(cfgMap, DisableInlineSpec, DefaultDisableInlineSpec, &tc.DisableInlineSpec); err != nil {
return nil, err
}
- if err := setPerFeatureFlag(EnableConciseResolverSyntax, DefaultEnableConciseResolverSyntax, &tc.EnableConciseResolverSyntax); err != nil {
- return nil, err
- }
- if err := setFeature(EnableKubernetesSidecar, DefaultEnableKubernetesSidecar, &tc.EnableKubernetesSidecar); err != nil {
- return nil, err
- }
+ // Given that they are alpha features, Tekton Bundles and Custom Tasks should be switched on if
+ // enable-api-fields is "alpha". If enable-api-fields is not "alpha" then fall back to the value of
+ // each feature's individual flag.
+ //
+ // Note: the user cannot enable "alpha" while disabling bundles or custom tasks - that would
+ // defeat the purpose of having a single shared gate for all alpha features.
+ if tc.EnableAPIFields == AlphaAPIFields {
+ tc.EnableTektonOCIBundles = true
+ } else {
+ if err := setFeature(enableTektonOCIBundles, DefaultEnableTektonOciBundles, &tc.EnableTektonOCIBundles); err != nil {
+ return nil, err
+ }
+ }
return &tc, nil
}
diff --git a/upstream/pkg/apis/config/feature_flags_test.go b/upstream/pkg/apis/config/feature_flags_test.go
index 30c0e06a8e0..0cefc572368 100644
--- a/upstream/pkg/apis/config/feature_flags_test.go
+++ b/upstream/pkg/apis/config/feature_flags_test.go
@@ -43,6 +43,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
RequireGitSSHSecretKnownHosts: false,
DisableCredsInit: config.DefaultDisableCredsInit,
AwaitSidecarReadiness: config.DefaultAwaitSidecarReadiness,
+ EnableTektonOCIBundles: config.DefaultEnableTektonOciBundles,
EnableAPIFields: config.DefaultEnableAPIFields,
SendCloudEventsForRuns: config.DefaultSendCloudEventsForRuns,
VerificationNoMatchPolicy: config.DefaultNoMatchPolicyConfig,
@@ -57,7 +58,6 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
EnableStepActions: config.DefaultEnableStepActions.Enabled,
EnableParamEnum: config.DefaultEnableParamEnum.Enabled,
DisableInlineSpec: config.DefaultDisableInlineSpec,
- EnableConciseResolverSyntax: config.DefaultEnableConciseResolverSyntax.Enabled,
},
fileName: config.GetFeatureFlagsConfigName(),
},
@@ -67,6 +67,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
RunningInEnvWithInjectedSidecars: false,
AwaitSidecarReadiness: false,
RequireGitSSHSecretKnownHosts: true,
+ EnableTektonOCIBundles: true,
EnableAPIFields: "alpha",
SendCloudEventsForRuns: true,
EnforceNonfalsifiability: "spire",
@@ -82,8 +83,6 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
EnableArtifacts: true,
EnableParamEnum: true,
DisableInlineSpec: "pipeline,pipelinerun,taskrun",
- EnableConciseResolverSyntax: true,
- EnableKubernetesSidecar: true,
},
fileName: "feature-flags-all-flags-set",
},
@@ -92,6 +91,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
EnableAPIFields: "alpha",
// These are prescribed as true by enabling "alpha" API fields, even
// if the submitted text value is "false".
+ EnableTektonOCIBundles: true,
EnforceNonfalsifiability: config.DefaultEnforceNonfalsifiability,
DisableAffinityAssistant: config.DefaultDisableAffinityAssistant,
DisableCredsInit: config.DefaultDisableCredsInit,
@@ -109,7 +109,6 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
EnableCELInWhenExpression: config.DefaultEnableCELInWhenExpression.Enabled,
EnableStepActions: config.DefaultEnableStepActions.Enabled,
EnableParamEnum: config.DefaultEnableParamEnum.Enabled,
- EnableArtifacts: config.DefaultEnableArtifacts.Enabled,
DisableInlineSpec: config.DefaultDisableInlineSpec,
},
fileName: "feature-flags-enable-api-fields-overrides-bundles-and-custom-tasks",
@@ -117,6 +116,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
{
expectedConfig: &config.FeatureFlags{
EnableAPIFields: "stable",
+ EnableTektonOCIBundles: true,
EnforceNonfalsifiability: config.DefaultEnforceNonfalsifiability,
DisableAffinityAssistant: config.DefaultDisableAffinityAssistant,
DisableCredsInit: config.DefaultDisableCredsInit,
@@ -138,6 +138,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
{
expectedConfig: &config.FeatureFlags{
EnableAPIFields: "beta",
+ EnableTektonOCIBundles: config.DefaultEnableTektonOciBundles,
EnforceNonfalsifiability: config.DefaultEnforceNonfalsifiability,
DisableAffinityAssistant: config.DefaultDisableAffinityAssistant,
DisableCredsInit: config.DefaultDisableCredsInit,
@@ -160,6 +161,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) {
expectedConfig: &config.FeatureFlags{
EnableAPIFields: config.DefaultEnableAPIFields,
EnforceNonfalsifiability: config.EnforceNonfalsifiabilityWithSpire,
+ EnableTektonOCIBundles: config.DefaultEnableTektonOciBundles,
VerificationNoMatchPolicy: config.DefaultNoMatchPolicyConfig,
RunningInEnvWithInjectedSidecars: config.DefaultRunningInEnvWithInjectedSidecars,
AwaitSidecarReadiness: config.DefaultAwaitSidecarReadiness,
@@ -215,6 +217,7 @@ func TestNewFeatureFlagsFromEmptyConfigMap(t *testing.T) {
RunningInEnvWithInjectedSidecars: config.DefaultRunningInEnvWithInjectedSidecars,
AwaitSidecarReadiness: config.DefaultAwaitSidecarReadiness,
RequireGitSSHSecretKnownHosts: config.DefaultRequireGitSSHSecretKnownHosts,
+ EnableTektonOCIBundles: config.DefaultEnableTektonOciBundles,
EnableAPIFields: config.DefaultEnableAPIFields,
SendCloudEventsForRuns: config.DefaultSendCloudEventsForRuns,
EnforceNonfalsifiability: config.DefaultEnforceNonfalsifiability,
@@ -312,12 +315,6 @@ func TestNewFeatureFlagsConfigMapErrors(t *testing.T) {
}, {
fileName: "feature-flags-invalid-enable-artifacts",
want: `failed parsing feature flags config "invalid": strconv.ParseBool: parsing "invalid": invalid syntax for feature enable-artifacts`,
- }, {
- fileName: "feature-flags-invalid-enable-concise-resolver-syntax",
- want: `failed parsing feature flags config "invalid": strconv.ParseBool: parsing "invalid": invalid syntax for feature enable-concise-resolver-syntax`,
- }, {
- fileName: "feature-flags-invalid-enable-kubernetes-sidecar",
- want: `failed parsing feature flags config "invalid": strconv.ParseBool: parsing "invalid": invalid syntax`,
}} {
t.Run(tc.fileName, func(t *testing.T) {
cm := test.ConfigMapFromTestFile(t, tc.fileName)
@@ -332,6 +329,7 @@ func TestNewFeatureFlagsConfigMapErrors(t *testing.T) {
}
func TestGetVerificationNoMatchPolicy(t *testing.T) {
+ ctx := context.Background()
tcs := []struct {
name, noMatchPolicy, expected string
}{{
@@ -350,7 +348,6 @@ func TestGetVerificationNoMatchPolicy(t *testing.T) {
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
- ctx := context.Background()
store := config.NewStore(logging.FromContext(ctx).Named("config-store"))
featureflags := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -414,7 +411,7 @@ func TestIsSpireEnabled(t *testing.T) {
Data: tc.configmap,
}
store.OnConfigChanged(featureflags)
- ctx := store.ToContext(ctx)
+ ctx = store.ToContext(ctx)
got := config.IsSpireEnabled(ctx)
if tc.want != got {
diff --git a/upstream/pkg/apis/config/metrics.go b/upstream/pkg/apis/config/metrics.go
index f86d4a136a2..2b18f6d0c38 100644
--- a/upstream/pkg/apis/config/metrics.go
+++ b/upstream/pkg/apis/config/metrics.go
@@ -29,9 +29,6 @@ const (
// metricsPipelinerunLevel determines to what level to aggregate metrics
// for pipelinerun
metricsPipelinerunLevelKey = "metrics.pipelinerun.level"
- // metricsRunningPipelinerunLevelKey determines to what level to aggregate metrics
- // for running pipelineruns
- metricsRunningPipelinerunLevelKey = "metrics.running-pipelinerun.level"
// metricsDurationTaskrunType determines what type of
// metrics to use for aggregating duration for taskrun
metricsDurationTaskrunType = "metrics.taskrun.duration-type"
@@ -42,9 +39,6 @@ const (
// countWithReasonKey sets if the reason label should be included on count metrics
countWithReasonKey = "metrics.count.enable-reason"
- // throttledWithNamespaceKey sets if the namespace label should be included on the taskrun throttled metrics
- throttledWithNamespaceKey = "metrics.taskrun.throttle.enable-namespace"
-
// DefaultTaskrunLevel determines to what level to aggregate metrics
// when it isn't specified in configmap
DefaultTaskrunLevel = TaskrunLevelAtTask
@@ -58,9 +52,6 @@ const (
// DefaultPipelinerunLevel determines to what level to aggregate metrics
// when it isn't specified in configmap
DefaultPipelinerunLevel = PipelinerunLevelAtPipeline
- // DefaultRunningPipelinerunLevel determines to what level to aggregate metrics
- // when it isn't specified in configmap
- DefaultRunningPipelinerunLevel = ""
// PipelinerunLevelAtPipelinerun specify that aggregation will be done at
// pipelinerun level
PipelinerunLevelAtPipelinerun = "pipelinerun"
@@ -102,11 +93,9 @@ var DefaultMetrics, _ = newMetricsFromMap(map[string]string{})
type Metrics struct {
TaskrunLevel string
PipelinerunLevel string
- RunningPipelinerunLevel string
DurationTaskrunType string
DurationPipelinerunType string
CountWithReason bool
- ThrottleWithNamespace bool
}
// GetMetricsConfigName returns the name of the configmap containing all
@@ -137,11 +126,9 @@ func newMetricsFromMap(cfgMap map[string]string) (*Metrics, error) {
tc := Metrics{
TaskrunLevel: DefaultTaskrunLevel,
PipelinerunLevel: DefaultPipelinerunLevel,
- RunningPipelinerunLevel: DefaultRunningPipelinerunLevel,
DurationTaskrunType: DefaultDurationTaskrunType,
DurationPipelinerunType: DefaultDurationPipelinerunType,
CountWithReason: false,
- ThrottleWithNamespace: false,
}
if taskrunLevel, ok := cfgMap[metricsTaskrunLevelKey]; ok {
@@ -151,9 +138,6 @@ func newMetricsFromMap(cfgMap map[string]string) (*Metrics, error) {
if pipelinerunLevel, ok := cfgMap[metricsPipelinerunLevelKey]; ok {
tc.PipelinerunLevel = pipelinerunLevel
}
- if runningPipelinerunLevel, ok := cfgMap[metricsRunningPipelinerunLevelKey]; ok {
- tc.RunningPipelinerunLevel = runningPipelinerunLevel
- }
if durationTaskrun, ok := cfgMap[metricsDurationTaskrunType]; ok {
tc.DurationTaskrunType = durationTaskrun
}
@@ -165,10 +149,6 @@ func newMetricsFromMap(cfgMap map[string]string) (*Metrics, error) {
tc.CountWithReason = true
}
- if throttleWithNamespace, ok := cfgMap[throttledWithNamespaceKey]; ok && throttleWithNamespace != "false" {
- tc.ThrottleWithNamespace = true
- }
-
return &tc, nil
}
diff --git a/upstream/pkg/apis/config/metrics_test.go b/upstream/pkg/apis/config/metrics_test.go
index 62872897af1..4508a5c71e3 100644
--- a/upstream/pkg/apis/config/metrics_test.go
+++ b/upstream/pkg/apis/config/metrics_test.go
@@ -36,11 +36,9 @@ func TestNewMetricsFromConfigMap(t *testing.T) {
expectedConfig: &config.Metrics{
TaskrunLevel: config.TaskrunLevelAtTaskrun,
PipelinerunLevel: config.PipelinerunLevelAtPipelinerun,
- RunningPipelinerunLevel: config.DefaultRunningPipelinerunLevel,
DurationTaskrunType: config.DurationPipelinerunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeHistogram,
CountWithReason: false,
- ThrottleWithNamespace: false,
},
fileName: config.GetMetricsConfigName(),
},
@@ -48,11 +46,9 @@ func TestNewMetricsFromConfigMap(t *testing.T) {
expectedConfig: &config.Metrics{
TaskrunLevel: config.TaskrunLevelAtNS,
PipelinerunLevel: config.PipelinerunLevelAtNS,
- RunningPipelinerunLevel: config.PipelinerunLevelAtNS,
DurationTaskrunType: config.DurationTaskrunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
CountWithReason: false,
- ThrottleWithNamespace: false,
},
fileName: "config-observability-namespacelevel",
},
@@ -60,26 +56,12 @@ func TestNewMetricsFromConfigMap(t *testing.T) {
expectedConfig: &config.Metrics{
TaskrunLevel: config.TaskrunLevelAtNS,
PipelinerunLevel: config.PipelinerunLevelAtNS,
- RunningPipelinerunLevel: config.DefaultRunningPipelinerunLevel,
DurationTaskrunType: config.DurationTaskrunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
CountWithReason: true,
- ThrottleWithNamespace: false,
},
fileName: "config-observability-reason",
},
- {
- expectedConfig: &config.Metrics{
- TaskrunLevel: config.TaskrunLevelAtNS,
- PipelinerunLevel: config.PipelinerunLevelAtNS,
- RunningPipelinerunLevel: config.PipelinerunLevelAtPipeline,
- DurationTaskrunType: config.DurationTaskrunTypeHistogram,
- DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
- CountWithReason: true,
- ThrottleWithNamespace: true,
- },
- fileName: "config-observability-throttle",
- },
}
for _, tc := range testCases {
@@ -92,11 +74,9 @@ func TestNewMetricsFromEmptyConfigMap(t *testing.T) {
expectedConfig := &config.Metrics{
TaskrunLevel: config.TaskrunLevelAtTask,
PipelinerunLevel: config.PipelinerunLevelAtPipeline,
- RunningPipelinerunLevel: config.DefaultRunningPipelinerunLevel,
DurationTaskrunType: config.DurationPipelinerunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeHistogram,
CountWithReason: false,
- ThrottleWithNamespace: false,
}
verifyConfigFileWithExpectedMetricsConfig(t, MetricsConfigEmptyName, expectedConfig)
}
diff --git a/upstream/pkg/apis/config/testdata/config-observability-namespacelevel.yaml b/upstream/pkg/apis/config/testdata/config-observability-namespacelevel.yaml
index 65a72ede515..5029ee0099f 100644
--- a/upstream/pkg/apis/config/testdata/config-observability-namespacelevel.yaml
+++ b/upstream/pkg/apis/config/testdata/config-observability-namespacelevel.yaml
@@ -27,5 +27,4 @@ data:
metrics.taskrun.level: "namespace"
metrics.taskrun.duration-type: "histogram"
metrics.pipelinerun.level: "namespace"
- metrics.running-pipelinerun.level: "namespace"
metrics.pipelinerun.duration-type: "lastvalue"
diff --git a/upstream/pkg/apis/config/testdata/config-observability-throttle.yaml b/upstream/pkg/apis/config/testdata/config-observability-throttle.yaml
deleted file mode 100644
index 08fe6ac9d5a..00000000000
--- a/upstream/pkg/apis/config/testdata/config-observability-throttle.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2019 The Tekton Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: config-observability
- namespace: tekton-pipelines
- labels:
- app.kubernetes.io/instance: default
- app.kubernetes.io/part-of: tekton-pipelines
-data:
- metrics.backend-destination: prometheus
- metrics.stackdriver-project-id: ""
- metrics.allow-stackdriver-custom-metrics: "false"
- metrics.taskrun.level: "namespace"
- metrics.taskrun.duration-type: "histogram"
- metrics.pipelinerun.level: "namespace"
- metrics.running-pipelinerun.level: "pipeline"
- metrics.pipelinerun.duration-type: "lastvalue"
- metrics.count.enable-reason: "true"
- metrics.taskrun.throttle.enable-namespace: "true"
diff --git a/upstream/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml b/upstream/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml
index a01101604a1..6b539bc16da 100644
--- a/upstream/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml
+++ b/upstream/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml
@@ -37,5 +37,3 @@ data:
enable-param-enum: "true"
enable-artifacts: "true"
disable-inline-spec: "pipeline,pipelinerun,taskrun"
- enable-concise-resolver-syntax: "true"
- enable-kubernetes-sidecar: "true"
diff --git a/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-concise-resolver-syntax.yaml b/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-concise-resolver-syntax.yaml
deleted file mode 100644
index 4945e2f6f76..00000000000
--- a/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-concise-resolver-syntax.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2024 The Tekton Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: feature-flags
- namespace: tekton-pipelines
-data:
- enable-concise-resolver-syntax: "invalid"
diff --git a/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-kubernetes-sidecar.yaml b/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-kubernetes-sidecar.yaml
deleted file mode 100644
index cc78bf080d8..00000000000
--- a/upstream/pkg/apis/config/testdata/feature-flags-invalid-enable-kubernetes-sidecar.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2024 The Tekton Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: feature-flags
- namespace: tekton-pipelines
-data:
- enable-kubernetes-sidecar: "invalid"
diff --git a/upstream/pkg/apis/pipeline/errors/errors.go b/upstream/pkg/apis/pipeline/errors/errors.go
index fbd487bba33..f81dd2e5f82 100644
--- a/upstream/pkg/apis/pipeline/errors/errors.go
+++ b/upstream/pkg/apis/pipeline/errors/errors.go
@@ -13,12 +13,7 @@ limitations under the License.
package errors
-import (
- "errors"
- "strings"
-
- apierrors "k8s.io/apimachinery/pkg/api/errors"
-)
+import "errors"
const UserErrorLabel = "[User error] "
@@ -76,10 +71,3 @@ func GetErrorMessage(err error) string {
}
return err.Error()
}
-
-// IsImmutableTaskRunSpecError returns true if the error is the taskrun spec is immutable
-func IsImmutableTaskRunSpecError(err error) bool {
- // The TaskRun may have completed and the spec field is immutable.
- // validation code: https://github.com/tektoncd/pipeline/blob/v0.62.0/pkg/apis/pipeline/v1/taskrun_validation.go#L136-L138
- return apierrors.IsBadRequest(err) && strings.Contains(err.Error(), "no updates are allowed")
-}
diff --git a/upstream/pkg/apis/pipeline/options.go b/upstream/pkg/apis/pipeline/options.go
index aef10b995e1..2e75adca4c1 100644
--- a/upstream/pkg/apis/pipeline/options.go
+++ b/upstream/pkg/apis/pipeline/options.go
@@ -16,11 +16,8 @@ limitations under the License.
package pipeline
-import "time"
-
// Options holds options passed to the Tekton Pipeline controllers
// typically via command-line flags.
type Options struct {
- Images Images
- ResyncPeriod time.Duration
+ Images Images
}
diff --git a/upstream/pkg/apis/pipeline/paths.go b/upstream/pkg/apis/pipeline/paths.go
index efb28ea9025..fb2b3bcf87c 100644
--- a/upstream/pkg/apis/pipeline/paths.go
+++ b/upstream/pkg/apis/pipeline/paths.go
@@ -30,6 +30,4 @@ const (
StepsDir = "/tekton/steps"
ScriptDir = "/tekton/scripts"
-
- ArtifactsDir = "/tekton/artifacts"
)
diff --git a/upstream/pkg/apis/pipeline/pod/affinity_assitant_template.go b/upstream/pkg/apis/pipeline/pod/affinity_assitant_template.go
index 214ec6a8700..2dace6398ac 100644
--- a/upstream/pkg/apis/pipeline/pod/affinity_assitant_template.go
+++ b/upstream/pkg/apis/pipeline/pod/affinity_assitant_template.go
@@ -42,19 +42,6 @@ type AffinityAssistantTemplate struct {
// +optional
// +listType=atomic
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
-
- // SecurityContext sets the security context for the pod
- // +optional
- SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
-
- // If specified, indicates the pod's priority. "system-node-critical" and
- // "system-cluster-critical" are two special keywords which indicate the
- // highest priorities with the former being the highest priority. Any other
- // name must be defined by creating a PriorityClass object with that name.
- // If not specified, the pod priority will be default or zero if there is no
- // default.
- // +optional
- PriorityClassName *string `json:"priorityClassName,omitempty"`
}
// Equals checks if this Template is identical to the given Template.
diff --git a/upstream/pkg/apis/pipeline/pod/template.go b/upstream/pkg/apis/pipeline/pod/template.go
index 6bc37fecd7f..855a6ea9b8d 100644
--- a/upstream/pkg/apis/pipeline/pod/template.go
+++ b/upstream/pkg/apis/pipeline/pod/template.go
@@ -148,11 +148,9 @@ func (tpl *Template) ToAffinityAssistantTemplate() *AffinityAssistantTemplate {
}
return &AffinityAssistantTemplate{
- NodeSelector: tpl.NodeSelector,
- Tolerations: tpl.Tolerations,
- ImagePullSecrets: tpl.ImagePullSecrets,
- SecurityContext: tpl.SecurityContext,
- PriorityClassName: tpl.PriorityClassName,
+ NodeSelector: tpl.NodeSelector,
+ Tolerations: tpl.Tolerations,
+ ImagePullSecrets: tpl.ImagePullSecrets,
}
}
@@ -249,13 +247,6 @@ func MergeAAPodTemplateWithDefault(tpl, defaultTpl *AAPodTemplate) *AAPodTemplat
if tpl.ImagePullSecrets == nil {
tpl.ImagePullSecrets = defaultTpl.ImagePullSecrets
}
- if tpl.SecurityContext == nil {
- tpl.SecurityContext = defaultTpl.SecurityContext
- }
- if tpl.PriorityClassName == nil {
- tpl.PriorityClassName = defaultTpl.PriorityClassName
- }
-
return tpl
}
}
diff --git a/upstream/pkg/apis/pipeline/pod/template_test.go b/upstream/pkg/apis/pipeline/pod/template_test.go
index 4e09e5068a3..64eaf704877 100644
--- a/upstream/pkg/apis/pipeline/pod/template_test.go
+++ b/upstream/pkg/apis/pipeline/pod/template_test.go
@@ -163,62 +163,7 @@ func TestMergePodTemplateWithDefault(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := MergePodTemplateWithDefault(tc.tpl, tc.defaultTpl)
if !reflect.DeepEqual(result, tc.expected) {
- t.Errorf("mergePodTemplateWithDefault%v, %v) = %v, want %v", tc.tpl, tc.defaultTpl, result, tc.expected)
- }
- })
- }
-}
-
-func TestMergeAAPodTemplateWithDefault(t *testing.T) {
- priority1 := "low-priority"
- priority2 := "high-priority"
- type testCase struct {
- name string
- tpl *AAPodTemplate
- defaultTpl *AAPodTemplate
- expected *AAPodTemplate
- }
-
- testCases := []testCase{
- {
- name: "defaultTpl is nil",
- tpl: &AAPodTemplate{
- NodeSelector: map[string]string{"foo": "bar"},
- },
- defaultTpl: nil,
- expected: &AAPodTemplate{
- NodeSelector: map[string]string{"foo": "bar"},
- },
- },
- {
- name: "tpl is nil",
- tpl: nil,
- defaultTpl: &AAPodTemplate{
- NodeSelector: map[string]string{"foo": "bar"},
- },
- expected: &AAPodTemplate{
- NodeSelector: map[string]string{"foo": "bar"},
- },
- },
- {
- name: "override default priorityClassName",
- tpl: &AAPodTemplate{
- PriorityClassName: &priority2,
- },
- defaultTpl: &AAPodTemplate{
- PriorityClassName: &priority1,
- },
- expected: &AAPodTemplate{
- PriorityClassName: &priority2,
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- result := MergeAAPodTemplateWithDefault(tc.tpl, tc.defaultTpl)
- if !reflect.DeepEqual(result, tc.expected) {
- t.Errorf("mergeAAPodTemplateWithDefault(%v, %v) = %v, want %v", tc.tpl, tc.defaultTpl, result, tc.expected)
+ t.Errorf("mergeByName(%v, %v) = %v, want %v", tc.tpl, tc.defaultTpl, result, tc.expected)
}
})
}
diff --git a/upstream/pkg/apis/pipeline/pod/zz_generated.deepcopy.go b/upstream/pkg/apis/pipeline/pod/zz_generated.deepcopy.go
index 098b0fbeb13..450128e3a9d 100644
--- a/upstream/pkg/apis/pipeline/pod/zz_generated.deepcopy.go
+++ b/upstream/pkg/apis/pipeline/pod/zz_generated.deepcopy.go
@@ -47,16 +47,6 @@ func (in *AffinityAssistantTemplate) DeepCopyInto(out *AffinityAssistantTemplate
*out = make([]v1.LocalObjectReference, len(*in))
copy(*out, *in)
}
- if in.SecurityContext != nil {
- in, out := &in.SecurityContext, &out.SecurityContext
- *out = new(v1.PodSecurityContext)
- (*in).DeepCopyInto(*out)
- }
- if in.PriorityClassName != nil {
- in, out := &in.PriorityClassName, &out.PriorityClassName
- *out = new(string)
- **out = **in
- }
return
}
diff --git a/upstream/pkg/apis/pipeline/register.go b/upstream/pkg/apis/pipeline/register.go
index 9971a9b79d6..ff713753d06 100644
--- a/upstream/pkg/apis/pipeline/register.go
+++ b/upstream/pkg/apis/pipeline/register.go
@@ -34,18 +34,12 @@ const (
// TaskRunLabelKey is used as the label identifier for a TaskRun
TaskRunLabelKey = GroupName + "/taskRun"
- // TaskRunLabelKey is used as the label identifier for a TaskRun
- TaskRunUIDLabelKey = GroupName + "/taskRunUID"
-
// PipelineLabelKey is used as the label identifier for a Pipeline
PipelineLabelKey = GroupName + "/pipeline"
// PipelineRunLabelKey is used as the label identifier for a PipelineRun
PipelineRunLabelKey = GroupName + "/pipelineRun"
- // PipelineRunLabelKey is used as the label identifier for a PipelineRun
- PipelineRunUIDLabelKey = GroupName + "/pipelineRunUID"
-
// PipelineTaskLabelKey is used as the label identifier for a PipelineTask
PipelineTaskLabelKey = GroupName + "/pipelineTask"
diff --git a/upstream/pkg/apis/pipeline/v1/artifact_types.go b/upstream/pkg/apis/pipeline/v1/artifact_types.go
index 6281c3e1e45..07e43ebe171 100644
--- a/upstream/pkg/apis/pipeline/v1/artifact_types.go
+++ b/upstream/pkg/apis/pipeline/v1/artifact_types.go
@@ -1,37 +1,13 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
package v1
-import (
- "github.com/google/go-cmp/cmp"
-)
-
// Algorithm Standard cryptographic hash algorithm
type Algorithm string
// Artifact represents an artifact within a system, potentially containing multiple values
// associated with it.
type Artifact struct {
- // The artifact's identifying category name
- Name string `json:"name,omitempty"`
- // A collection of values related to the artifact
- Values []ArtifactValue `json:"values,omitempty"`
- // Indicate if the artifact is a build output or a by-product
- BuildOutput bool `json:"buildOutput,omitempty"`
+ Name string `json:"name,omitempty"` // The artifact's identifying category name
+ Values []ArtifactValue `json:"values,omitempty"` // A collection of values related to the artifact
}
// ArtifactValue represents a specific value or data element within an Artifact.
@@ -51,85 +27,3 @@ type Artifacts struct {
Inputs []Artifact `json:"inputs,omitempty"`
Outputs []Artifact `json:"outputs,omitempty"`
}
-
-func (a *Artifacts) Merge(another *Artifacts) {
- inputMap := make(map[string][]ArtifactValue)
- var newInputs []Artifact
-
- for _, v := range a.Inputs {
- inputMap[v.Name] = v.Values
- }
- if another != nil {
- for _, v := range another.Inputs {
- _, ok := inputMap[v.Name]
- if !ok {
- inputMap[v.Name] = []ArtifactValue{}
- }
- for _, vv := range v.Values {
- exists := false
- for _, av := range inputMap[v.Name] {
- if cmp.Equal(vv, av) {
- exists = true
- break
- }
- }
- if !exists {
- inputMap[v.Name] = append(inputMap[v.Name], vv)
- }
- }
- }
- }
-
- for k, v := range inputMap {
- newInputs = append(newInputs, Artifact{
- Name: k,
- Values: v,
- })
- }
-
- outputMap := make(map[string]Artifact)
- var newOutputs []Artifact
- for _, v := range a.Outputs {
- outputMap[v.Name] = v
- }
-
- if another != nil {
- for _, v := range another.Outputs {
- _, ok := outputMap[v.Name]
- if !ok {
- outputMap[v.Name] = Artifact{Name: v.Name, Values: []ArtifactValue{}, BuildOutput: v.BuildOutput}
- }
- // only update buildOutput to true.
- // Do not convert to false if it was true before.
- if v.BuildOutput {
- art := outputMap[v.Name]
- art.BuildOutput = v.BuildOutput
- outputMap[v.Name] = art
- }
- for _, vv := range v.Values {
- exists := false
- for _, av := range outputMap[v.Name].Values {
- if cmp.Equal(vv, av) {
- exists = true
- break
- }
- }
- if !exists {
- art := outputMap[v.Name]
- art.Values = append(art.Values, vv)
- outputMap[v.Name] = art
- }
- }
- }
- }
-
- for _, v := range outputMap {
- newOutputs = append(newOutputs, Artifact{
- Name: v.Name,
- Values: v.Values,
- BuildOutput: v.BuildOutput,
- })
- }
- a.Inputs = newInputs
- a.Outputs = newOutputs
-}
diff --git a/upstream/pkg/apis/pipeline/v1/artifact_types_test.go b/upstream/pkg/apis/pipeline/v1/artifact_types_test.go
deleted file mode 100644
index 979b73286ad..00000000000
--- a/upstream/pkg/apis/pipeline/v1/artifact_types_test.go
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "github.com/tektoncd/pipeline/test/diff"
-)
-
-func TestArtifactsMerge(t *testing.T) {
- type testCase struct {
- name string
- a1 Artifacts
- a2 Artifacts
- expected Artifacts
- }
-
- testCases := []testCase{
- {
- name: "Merges inputs and outputs with deduplication",
- a1: Artifacts{
- Inputs: []Artifact{
- {
- Name: "input1",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
- Uri: "pkg:maven/org.apache.commons/commons-lang3/3.12.0",
- },
- },
- },
- {
- Name: "input2",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "d596377f2d54b3f8b4619f137d08892989893b886742759144582c94157526f1"},
- Uri: "pkg:pypi/requests/2.28.2",
- },
- },
- },
- },
- Outputs: []Artifact{
- {
- Name: "output1",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "47de7a85905970a45132f48a9247879a15c483477e23a637504694e611135b40e"},
- Uri: "pkg:npm/lodash/4.17.21",
- },
- },
- },
- },
- },
- a2: Artifacts{
- Inputs: []Artifact{
- {
- Name: "input1",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
- Uri: "pkg:maven/org.apache.commons/commons-lang3/3.12.0",
- },
- {
- Digest: map[Algorithm]string{"sha256": "97c13e1812b666824266111701398e56e30d14418a2d9b35987f516a66e2129f"},
- Uri: "pkg:nuget/Microsoft.Extensions.Logging/7.0.0",
- },
- },
- },
- {
- Name: "input3",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "13c2b709e3a100726680e53e19666656a89a2f2490e917ba15d6b15475ab7b79"},
- Uri: "pkg:debian/openssl/1.1.1",
- },
- },
- },
- },
- Outputs: []Artifact{
- {
- Name: "output1",
- BuildOutput: true,
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "698c4539633943f7889f41605003d7fa63833722ebd2b37c7e75df1d3d06941a"},
- Uri: "pkg:nuget/Newtonsoft.Json/13.0.3",
- },
- },
- },
- {
- Name: "output2",
- BuildOutput: true,
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "7e406d83706c7193df3e38b66d350e55df6f13d2a28a1d35917a043533a70f5c"},
- Uri: "pkg:pypi/pandas/2.0.1",
- },
- },
- },
- },
- },
- expected: Artifacts{
- Inputs: []Artifact{
- {
- Name: "input1",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
- Uri: "pkg:maven/org.apache.commons/commons-lang3/3.12.0",
- },
- {
- Digest: map[Algorithm]string{"sha256": "97c13e1812b666824266111701398e56e30d14418a2d9b35987f516a66e2129f"},
- Uri: "pkg:nuget/Microsoft.Extensions.Logging/7.0.0",
- },
- },
- },
- {
- Name: "input2",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "d596377f2d54b3f8b4619f137d08892989893b886742759144582c94157526f1"},
- Uri: "pkg:pypi/requests/2.28.2",
- },
- },
- },
- {
- Name: "input3",
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "13c2b709e3a100726680e53e19666656a89a2f2490e917ba15d6b15475ab7b79"},
- Uri: "pkg:debian/openssl/1.1.1",
- },
- },
- },
- },
- Outputs: []Artifact{
- {
- Name: "output1",
- BuildOutput: true,
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "47de7a85905970a45132f48a9247879a15c483477e23a637504694e611135b40e"},
- Uri: "pkg:npm/lodash/4.17.21",
- },
- {
- Digest: map[Algorithm]string{"sha256": "698c4539633943f7889f41605003d7fa63833722ebd2b37c7e75df1d3d06941a"},
- Uri: "pkg:nuget/Newtonsoft.Json/13.0.3",
- },
- },
- },
- {
- Name: "output2",
- BuildOutput: true,
- Values: []ArtifactValue{
- {
- Digest: map[Algorithm]string{"sha256": "7e406d83706c7193df3e38b66d350e55df6f13d2a28a1d35917a043533a70f5c"},
- Uri: "pkg:pypi/pandas/2.0.1",
- },
- },
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- tc.a1.Merge(&tc.a2)
- got := tc.a1
- if d := cmp.Diff(tc.expected, got, cmpopts.SortSlices(func(a, b Artifact) bool { return a.Name > b.Name })); d != "" {
- t.Errorf("TestArtifactsMerge() did not produce expected artifacts for test %s: %s", tc.name, diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1/container_types.go b/upstream/pkg/apis/pipeline/v1/container_types.go
index 2dc4a8984f9..9f0c48ae9af 100644
--- a/upstream/pkg/apis/pipeline/v1/container_types.go
+++ b/upstream/pkg/apis/pipeline/v1/container_types.go
@@ -152,10 +152,6 @@ type Step struct {
// +optional
// +listType=atomic
Results []StepResult `json:"results,omitempty"`
-
- // When is a list of when expressions that need to be true for the task to run
- // +optional
- When StepWhenExpressions `json:"when,omitempty"`
}
// Ref can be used to refer to a specific instance of a StepAction.
@@ -543,43 +539,10 @@ type Sidecar struct {
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
-
- // RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
- // initContainer and must have it's policy set to "Always". It is currently
- // left optional to help support Kubernetes versions prior to 1.29 when this feature
- // was introduced.
- // +optional
- RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
}
// ToK8sContainer converts the Sidecar to a Kubernetes Container struct
func (s *Sidecar) ToK8sContainer() *corev1.Container {
- if s.RestartPolicy == nil {
- return &corev1.Container{
- Name: s.Name,
- Image: s.Image,
- Command: s.Command,
- Args: s.Args,
- WorkingDir: s.WorkingDir,
- Ports: s.Ports,
- EnvFrom: s.EnvFrom,
- Env: s.Env,
- Resources: s.ComputeResources,
- VolumeMounts: s.VolumeMounts,
- VolumeDevices: s.VolumeDevices,
- LivenessProbe: s.LivenessProbe,
- ReadinessProbe: s.ReadinessProbe,
- StartupProbe: s.StartupProbe,
- Lifecycle: s.Lifecycle,
- TerminationMessagePath: s.TerminationMessagePath,
- TerminationMessagePolicy: s.TerminationMessagePolicy,
- ImagePullPolicy: s.ImagePullPolicy,
- SecurityContext: s.SecurityContext,
- Stdin: s.Stdin,
- StdinOnce: s.StdinOnce,
- TTY: s.TTY,
- }
- }
return &corev1.Container{
Name: s.Name,
Image: s.Image,
@@ -594,7 +557,6 @@ func (s *Sidecar) ToK8sContainer() *corev1.Container {
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
- RestartPolicy: s.RestartPolicy,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
@@ -631,7 +593,6 @@ func (s *Sidecar) SetContainerFields(c corev1.Container) {
s.Stdin = c.Stdin
s.StdinOnce = c.StdinOnce
s.TTY = c.TTY
- s.RestartPolicy = c.RestartPolicy
}
// GetVarSubstitutionExpressions walks all the places a substitution reference can be used
diff --git a/upstream/pkg/apis/pipeline/v1/container_types_test.go b/upstream/pkg/apis/pipeline/v1/container_types_test.go
index fd7c558eafa..06948f9acaa 100644
--- a/upstream/pkg/apis/pipeline/v1/container_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1/container_types_test.go
@@ -120,35 +120,3 @@ func TestSidecarGetVarSubstitutionExpressions(t *testing.T) {
t.Fatalf("Unexpected result (-want, +got): %s", d)
}
}
-
-func TestSidecarRestartPolicyToK8sContainer(t *testing.T) {
- always := corev1.ContainerRestartPolicyAlways
- s := Sidecar{
- Name: "sidecarName",
- RestartPolicy: &always,
- }
-
- expectedContainer := corev1.Container{
- Name: "sidecarName",
- RestartPolicy: &always,
- }
-
- c := s.ToK8sContainer()
-
- if !(c.RestartPolicy == expectedContainer.RestartPolicy) {
- t.Fatalf("Unexpected result with RestartPolicy")
- }
-
- s = Sidecar{
- Name: "sidecarName",
- }
-
- expectedContainer = corev1.Container{
- Name: "sidecarName",
- }
-
- c = s.ToK8sContainer()
- if !(c.RestartPolicy == expectedContainer.RestartPolicy) {
- t.Fatalf("Unexpected result without RestartPolicy")
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1/container_validation.go b/upstream/pkg/apis/pipeline/v1/container_validation.go
index ec55189bc32..bfee6884b0b 100644
--- a/upstream/pkg/apis/pipeline/v1/container_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/container_validation.go
@@ -18,9 +18,6 @@ package v1
import (
"context"
- "errors"
- "fmt"
- "regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
@@ -28,72 +25,38 @@ import (
"knative.dev/pkg/apis"
)
-func validateRef(ctx context.Context, refName string, refResolver ResolverName, refParams Params) (errs *apis.FieldError) {
+// Validate ensures that a supplied Ref field is populated
+// correctly. No errors are returned for a nil Ref.
+func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
+ if ref == nil {
+ return errs
+ }
+
switch {
- case refResolver != "" || refParams != nil:
- if refParams != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
- if refName != "" {
- errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
- }
- if refResolver == "" {
- errs = errs.Also(apis.ErrMissingField("resolver"))
- }
- errs = errs.Also(ValidateParameters(ctx, refParams))
- }
- if refResolver != "" {
+ case ref.Resolver != "" || ref.Params != nil:
+ if ref.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
- if refName != "" {
- // make sure that the name is url-like.
- err := RefNameLikeUrl(refName)
- if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- // If name is url-like then concise resolver syntax must be enabled
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- if err != nil {
- errs = errs.Also(apis.ErrInvalidValue(err, "name"))
- }
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
}
}
- case refName != "":
- // ref name can be a Url-like format.
- if err := RefNameLikeUrl(refName); err == nil {
- // If name is url-like then concise resolver syntax must be enabled
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
+ if ref.Params != nil {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
- // In stage1 of concise remote resolvers syntax, this is a required field.
- // TODO: remove this check when implementing stage 2 where this is optional.
- if refResolver == "" {
+ if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
- // Or, it must be a valid k8s name
- } else {
- // ref name must be a valid k8s name
- if errSlice := validation.IsQualifiedName(refName); len(errSlice) != 0 {
- errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
- }
+ errs = errs.Also(ValidateParameters(ctx, ref.Params))
+ }
+ case ref.Name != "":
+ // ref name must be a valid k8s name
+ if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
+ errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return errs
}
-
-// Validate ensures that a supplied Ref field is populated
-// correctly. No errors are returned for a nil Ref.
-func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
- if ref == nil {
- return errs
- }
- return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
-}
-
-// RefNameLikeUrl checks if the name is url parsable and returns an error if it isn't.
-func RefNameLikeUrl(name string) error {
- schemeRegex := regexp.MustCompile(`[\w-]+:\/\/*`)
- if !schemeRegex.MatchString(name) {
- return errors.New("invalid URI for request")
- }
- return nil
-}
diff --git a/upstream/pkg/apis/pipeline/v1/container_validation_test.go b/upstream/pkg/apis/pipeline/v1/container_validation_test.go
index 74fbec19190..60c95f88120 100644
--- a/upstream/pkg/apis/pipeline/v1/container_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/container_validation_test.go
@@ -21,22 +21,12 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- "github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/test/diff"
"knative.dev/pkg/apis"
)
-func enableConciseResolverSyntax(ctx context.Context) context.Context {
- return config.ToContext(ctx, &config.Config{
- FeatureFlags: &config.FeatureFlags{
- EnableConciseResolverSyntax: true,
- EnableAPIFields: config.BetaAPIFields,
- },
- })
-}
-
func TestRef_Valid(t *testing.T) {
tests := []struct {
name string
@@ -47,10 +37,6 @@ func TestRef_Valid(t *testing.T) {
}, {
name: "simple ref",
ref: &v1.Ref{Name: "refname"},
- }, {
- name: "ref name - consice syntax",
- ref: &v1.Ref{Name: "foo://baz:ver", ResolverRef: v1.ResolverRef{Resolver: "git"}},
- wc: enableConciseResolverSyntax,
}, {
name: "beta feature: valid resolver",
ref: &v1.Ref{ResolverRef: v1.ResolverRef{Resolver: "git"}},
@@ -107,45 +93,29 @@ func TestRef_Invalid(t *testing.T) {
},
wantErr: apis.ErrMissingField("resolver"),
}, {
- name: "ref with resolver and k8s style name",
+ name: "ref resolver disallowed in conjunction with ref name",
ref: &v1.Ref{
Name: "foo",
ResolverRef: v1.ResolverRef{
Resolver: "git",
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
}, {
- name: "ref with url-like name without resolver",
+ name: "ref params disallowed in conjunction with ref name",
ref: &v1.Ref{
- Name: "https://foo.com/bar",
- },
- wantErr: apis.ErrMissingField("resolver"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "ref params disallowed in conjunction with pipelineref name",
- ref: &v1.Ref{
- Name: "https://foo/bar",
+ Name: "bar",
ResolverRef: v1.ResolverRef{
- Resolver: "git",
- Params: v1.Params{{Name: "foo", Value: v1.ParamValue{StringVal: "bar"}}},
+ Params: v1.Params{{
+ Name: "foo",
+ Value: v1.ParamValue{
+ Type: v1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "ref with url-like name without enable-concise-resolver-syntax",
- ref: &v1.Ref{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
- }, {
- name: "ref without enable-concise-resolver-syntax",
- ref: &v1.Ref{Name: "https://foo.com/bar", ResolverRef: v1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- },
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
}, {
name: "invalid ref name",
ref: &v1.Ref{Name: "_foo"},
diff --git a/upstream/pkg/apis/pipeline/v1/merge.go b/upstream/pkg/apis/pipeline/v1/merge.go
index b916d8caeb5..6331edd0765 100644
--- a/upstream/pkg/apis/pipeline/v1/merge.go
+++ b/upstream/pkg/apis/pipeline/v1/merge.go
@@ -46,11 +46,6 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e
}
for i, s := range steps {
- // If the stepaction has not been fetched yet then do not merge.
- // Skip over to the next one
- if s.Ref != nil {
- continue
- }
merged := corev1.Container{}
err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged)
if err != nil {
@@ -74,7 +69,6 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e
Results: s.Results,
Params: s.Params,
Ref: s.Ref,
- When: s.When,
Workspaces: s.Workspaces,
}
newStep.SetContainerFields(merged)
@@ -92,6 +86,7 @@ func MergeStepsWithSpecs(steps []Step, overrides []TaskRunStepSpec) ([]Step, err
stepNameToOverride[o.Name] = o
}
for i, s := range steps {
+ s := s
o, found := stepNameToOverride[s.Name]
if !found {
continue
@@ -118,6 +113,7 @@ func MergeSidecarsWithSpecs(sidecars []Sidecar, overrides []TaskRunSidecarSpec)
sidecarNameToOverride[o.Name] = o
}
for i, s := range sidecars {
+ s := s
o, found := sidecarNameToOverride[s.Name]
if !found {
continue
diff --git a/upstream/pkg/apis/pipeline/v1/merge_test.go b/upstream/pkg/apis/pipeline/v1/merge_test.go
index 07c24e0ab5a..339d4defaa0 100644
--- a/upstream/pkg/apis/pipeline/v1/merge_test.go
+++ b/upstream/pkg/apis/pipeline/v1/merge_test.go
@@ -26,7 +26,6 @@ import (
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
- "k8s.io/apimachinery/pkg/selection"
)
func TestMergeStepsWithStepTemplate(t *testing.T) {
@@ -146,8 +145,7 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
}},
}},
expected: []v1.Step{{
- Command: []string{"/somecmd"},
- Image: "some-image",
+ Command: []string{"/somecmd"}, Image: "some-image",
OnError: "foo",
Results: []v1.StepResult{{
Name: "result",
@@ -157,30 +155,11 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
}},
}},
}, {
- name: "step-ref-should-not-be-merged-with-steptemplate",
+ name: "ref-should-not-be-removed",
template: &v1.StepTemplate{
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: pointer.Bool(true),
},
- VolumeMounts: []corev1.VolumeMount{{
- Name: "data",
- MountPath: "/workspace/data",
- }},
- Env: []corev1.EnvVar{{
- Name: "KEEP_THIS",
- Value: "A_VALUE",
- }, {
- Name: "SOME_KEY_1",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- Key: "A_KEY",
- LocalObjectReference: corev1.LocalObjectReference{Name: "A_NAME"},
- },
- },
- }, {
- Name: "SOME_KEY_2",
- Value: "VALUE_2",
- }},
},
steps: []v1.Step{{
Ref: &v1.Ref{Name: "my-step-action"},
@@ -193,6 +172,9 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
}},
}},
expected: []v1.Step{{
+ SecurityContext: &corev1.SecurityContext{
+ RunAsNonRoot: pointer.Bool(true),
+ },
Ref: &v1.Ref{Name: "my-step-action"},
OnError: "foo",
Results: []v1.StepResult{{
@@ -258,17 +240,6 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
},
}},
}},
- }, {
- name: "when",
- template: nil,
- steps: []v1.Step{{
- Image: "some-image",
- When: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo", "bar"}}},
- }},
- expected: []v1.Step{{
- Image: "some-image",
- When: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo", "bar"}}},
- }},
}, {
name: "isolated workspaces",
template: &v1.StepTemplate{
diff --git a/upstream/pkg/apis/pipeline/v1/openapi_generated.go b/upstream/pkg/apis/pipeline/v1/openapi_generated.go
index d05c2178e31..f9380013f7d 100644
--- a/upstream/pkg/apis/pipeline/v1/openapi_generated.go
+++ b/upstream/pkg/apis/pipeline/v1/openapi_generated.go
@@ -162,24 +162,11 @@ func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.Reference
},
},
},
- "securityContext": {
- SchemaProps: spec.SchemaProps{
- Description: "SecurityContext sets the security context for the pod",
- Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
- },
- },
- "priorityClassName": {
- SchemaProps: spec.SchemaProps{
- Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
+ "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -408,14 +395,13 @@ func schema_pkg_apis_pipeline_v1_Artifact(ref common.ReferenceCallback) common.O
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
- Description: "The artifact's identifying category name",
- Type: []string{"string"},
- Format: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
- Description: "A collection of values related to the artifact",
+ Description: "The artifact's identifying category name",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
@@ -427,13 +413,6 @@ func schema_pkg_apis_pipeline_v1_Artifact(ref common.ReferenceCallback) common.O
},
},
},
- "buildOutput": {
- SchemaProps: spec.SchemaProps{
- Description: "Indicate if the artifact is a build output or a by-product",
- Type: []string{"boolean"},
- Format: "",
- },
- },
},
},
},
@@ -607,6 +586,7 @@ func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) comm
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
+ Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
@@ -868,7 +848,8 @@ func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.Open
},
"value": {
SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
+ Default: map[string]interface{}{},
+ Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
},
@@ -1168,6 +1149,7 @@ func schema_pkg_apis_pipeline_v1_PipelineResult(ref common.ReferenceCallback) co
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
@@ -1293,6 +1275,7 @@ func schema_pkg_apis_pipeline_v1_PipelineRunResult(ref common.ReferenceCallback)
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the result returned from the execution of this PipelineRun",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
@@ -2766,13 +2749,6 @@ func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.Op
},
},
},
- "restartPolicy": {
- SchemaProps: spec.SchemaProps{
- Description: "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
Required: []string{"name"},
},
@@ -3150,26 +3126,12 @@ func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenA
},
},
},
- "when": {
- SchemaProps: spec.SchemaProps{
- Description: "When is a list of when expressions that need to be true for the task to run",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"),
- },
- },
- },
- },
- },
},
Required: []string{"name"},
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
@@ -3197,7 +3159,7 @@ func schema_pkg_apis_pipeline_v1_StepResult(ref common.ReferenceCallback) common
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "StepResult used to describe the Results of a Step.\n\nThis is field is at an BETA stability level and gated by \"enable-step-actions\" feature flag.",
+ Description: "StepResult used to describe the Results of a Step.\n\nThis is field is at an ALPHA stability level and gated by \"enable-step-actions\" feature flag.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
@@ -3302,11 +3264,6 @@ func schema_pkg_apis_pipeline_v1_StepState(ref common.ReferenceCallback) common.
},
},
},
- "provenance": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance"),
- },
- },
"terminationReason": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
@@ -3343,7 +3300,7 @@ func schema_pkg_apis_pipeline_v1_StepState(ref common.ReferenceCallback) common.
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
@@ -3574,25 +3531,6 @@ func schema_pkg_apis_pipeline_v1_TaskBreakpoints(ref common.ReferenceCallback) c
Format: "",
},
},
- "beforeSteps": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
},
},
},
@@ -3915,6 +3853,7 @@ func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) com
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the given value of the result",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"),
},
},
@@ -4238,17 +4177,6 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatus(ref common.ReferenceCallback) com
},
},
},
- "artifacts": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Artifacts are the list of artifacts written out by the task's containers",
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts"),
- },
- },
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
@@ -4301,7 +4229,7 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatus(ref common.ReferenceCallback) com
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"},
}
}
@@ -4389,17 +4317,6 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref common.ReferenceCallbac
},
},
},
- "artifacts": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Artifacts are the list of artifacts written out by the task's containers",
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts"),
- },
- },
"sidecars": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
@@ -4452,7 +4369,7 @@ func schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref common.ReferenceCallbac
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Artifacts", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
diff --git a/upstream/pkg/apis/pipeline/v1/param_types_test.go b/upstream/pkg/apis/pipeline/v1/param_types_test.go
index 7191439c7e3..ae7d779f437 100644
--- a/upstream/pkg/apis/pipeline/v1/param_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1/param_types_test.go
@@ -308,6 +308,7 @@ type ParamValuesHolder struct {
AOrS v1.ParamValue `json:"val"`
}
+//nolint:musttag
func TestParamValues_UnmarshalJSON(t *testing.T) {
cases := []struct {
input map[string]interface{}
@@ -398,6 +399,7 @@ func TestParamValues_UnmarshalJSON_Directly(t *testing.T) {
}
}
+//nolint:musttag
func TestParamValues_UnmarshalJSON_Error(t *testing.T) {
cases := []struct {
desc string
@@ -415,6 +417,7 @@ func TestParamValues_UnmarshalJSON_Error(t *testing.T) {
}
}
+//nolint:musttag
func TestParamValues_MarshalJSON(t *testing.T) {
cases := []struct {
input v1.ParamValue
@@ -452,7 +455,7 @@ func TestArrayReference(t *testing.T) {
}}
for _, tt := range tests {
if d := cmp.Diff(tt.expectedResult, v1.ArrayReference(tt.p)); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -499,7 +502,7 @@ func TestExtractNames(t *testing.T) {
}}
for _, tt := range tests {
if d := cmp.Diff(tt.want, v1.Params.ExtractNames(tt.params)); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -562,7 +565,7 @@ func TestParams_ReplaceVariables(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got := tt.ps.ReplaceVariables(tt.stringReplacements, tt.arrayReplacements, tt.objectReplacements)
if d := cmp.Diff(tt.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -682,10 +685,10 @@ func TestParseTaskandResultName(t *testing.T) {
pipelineTaskName, resultName := tc.param.ParseTaskandResultName()
if d := cmp.Diff(tc.pipelineTaskName, pipelineTaskName); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tc.resultName, resultName); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -709,7 +712,7 @@ func TestGetNames(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
got := tc.params.GetNames()
if d := cmp.Diff(tc.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -770,7 +773,7 @@ func TestSortByType(t *testing.T) {
s, a, o := tc.params.SortByType()
got := []v1.ParamSpecs{s, a, o}
if d := cmp.Diff(tc.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -804,7 +807,7 @@ func TestValidateNoDuplicateNames(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
got := tc.params.ValidateNoDuplicateNames()
if d := cmp.Diff(tc.expectedError.Error(), got.Error()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
diff --git a/upstream/pkg/apis/pipeline/v1/pipeline_types_test.go b/upstream/pkg/apis/pipeline/v1/pipeline_types_test.go
index 7ecfb6ff25e..61771b90f2a 100644
--- a/upstream/pkg/apis/pipeline/v1/pipeline_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1/pipeline_types_test.go
@@ -86,7 +86,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskContinue,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "valid PipelineTask with onError:stopAndFail",
p: PipelineTask{
@@ -94,7 +94,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskStopAndFail,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "invalid OnError value",
p: PipelineTask{
@@ -103,7 +103,7 @@ func TestPipelineTask_OnError(t *testing.T) {
TaskRef: &TaskRef{Name: "foo"},
},
expectedError: apis.ErrInvalidValue("invalid-val", "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""),
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "OnError:stopAndFail and retries coexist - success",
p: PipelineTask{
@@ -112,7 +112,7 @@ func TestPipelineTask_OnError(t *testing.T) {
Retries: 1,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "OnError:continue and retries coexists - failure",
p: PipelineTask{
@@ -122,6 +122,15 @@ func TestPipelineTask_OnError(t *testing.T) {
TaskRef: &TaskRef{Name: "foo"},
},
expectedError: apis.ErrGeneric("PipelineTask OnError cannot be set to \"continue\" when Retries is greater than 0"),
+ wc: cfgtesting.EnableAlphaAPIFields,
+ }, {
+ name: "setting OnError in beta API version - failure",
+ p: PipelineTask{
+ Name: "foo",
+ OnError: PipelineTaskContinue,
+ TaskRef: &TaskRef{Name: "foo"},
+ },
+ expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"beta\""),
wc: cfgtesting.EnableBetaAPIFields,
}, {
name: "setting OnError in stable API version - failure",
@@ -130,7 +139,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskContinue,
TaskRef: &TaskRef{Name: "foo"},
},
- expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\""),
+ expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
wc: cfgtesting.EnableStableAPIFields,
}}
for _, tt := range tests {
@@ -572,7 +581,6 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
name string
task PipelineTask
expectedError apis.FieldError
- configMap map[string]string
}{{
name: "pipeline task - invalid taskSpec",
task: PipelineTask{
@@ -604,58 +612,15 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
Paths: []string{"taskRef.name"},
},
}, {
- name: "pipeline task - taskRef with resolver and k8s style name",
+ name: "pipeline task - taskRef with resolver and name",
task: PipelineTask{
Name: "foo",
TaskRef: &TaskRef{Name: "foo", ResolverRef: ResolverRef{Resolver: "git"}},
},
- expectedError: apis.FieldError{
- Message: `invalid value: invalid URI for request`,
- Paths: []string{"taskRef.name"},
- },
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
- }, {
- name: "pipeline task - taskRef with url-like name without enable-concise-resolver-syntax",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar"},
- },
- expectedError: *apis.ErrMissingField("taskRef.resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- Paths: []string{"taskRef"},
- }),
- }, {
- name: "pipeline task - taskRef without enable-concise-resolver-syntax",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar", ResolverRef: ResolverRef{Resolver: "git"}},
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- Paths: []string{"taskRef"},
- },
- }, {
- name: "pipeline task - taskRef with url-like name without resolver",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar"},
- },
- expectedError: apis.FieldError{
- Message: `missing field(s)`,
- Paths: []string{"taskRef.resolver"},
- },
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
- }, {
- name: "pipeline task - taskRef with name and params",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo/bar", ResolverRef: ResolverRef{Resolver: "git", Params: Params{{Name: "foo", Value: ParamValue{StringVal: "bar"}}}}},
- },
expectedError: apis.FieldError{
Message: `expected exactly one, got both`,
- Paths: []string{"taskRef.name", "taskRef.params"},
+ Paths: []string{"taskRef.name", "taskRef.resolver"},
},
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
}, {
name: "pipeline task - taskRef with resolver params but no resolver",
task: PipelineTask{
@@ -669,8 +634,7 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- ctx := cfgtesting.SetFeatureFlags(context.Background(), t, tt.configMap)
- err := tt.task.validateTask(ctx)
+ err := tt.task.validateTask(context.Background())
if err == nil {
t.Error("PipelineTask.validateTask() did not return error for invalid pipeline task")
}
diff --git a/upstream/pkg/apis/pipeline/v1/pipeline_validation.go b/upstream/pkg/apis/pipeline/v1/pipeline_validation.go
index c17293a02b9..35c0509435b 100644
--- a/upstream/pkg/apis/pipeline/v1/pipeline_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/pipeline_validation.go
@@ -22,7 +22,6 @@ import (
"slices"
"strings"
- "github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
@@ -90,7 +89,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateTasksAndFinallySection(ps))
errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally))
errs = errs.Also(validateWhenExpressions(ctx, ps.Tasks, ps.Finally))
- errs = errs.Also(validateArtifactReference(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateMatrix(ctx, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validateMatrix(ctx, ps.Finally).ViaField("finally"))
return errs
@@ -153,10 +151,10 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p
}
// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task.
-func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, additionalParams []ParamSpec, path string) (errs *apis.FieldError) {
+func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) {
for i, t := range l {
if t.TaskSpec != nil {
- errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, append(t.TaskSpec.Params, additionalParams...)).ViaFieldIndex(path, i))
+ errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i))
}
}
return errs
@@ -192,7 +190,7 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
}
if pt.OnError != "" {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.BetaAPIFields))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.AlphaAPIFields))
if pt.OnError != PipelineTaskContinue && pt.OnError != PipelineTaskStopAndFail {
errs = errs.Also(apis.ErrInvalidValue(pt.OnError, "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""))
}
@@ -387,8 +385,8 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration)
// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) {
- errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "tasks"))
- errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "finally"))
+ errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks"))
+ errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally"))
return errs
@@ -513,13 +511,9 @@ func (pt *PipelineTask) GetVarSubstitutionExpressions() []string {
return allExpressions
}
-// containsExecutionStatusRef checks if a specified param has a reference to execution status or reason
-// $(tasks..status), $(tasks.status), or $(tasks..reason)
func containsExecutionStatusRef(p string) bool {
- if strings.HasPrefix(p, "tasks.") {
- if strings.HasSuffix(p, ".status") || strings.HasSuffix(p, ".reason") {
- return true
- }
+ if strings.HasPrefix(p, "tasks.") && strings.HasSuffix(p, ".status") {
+ return true
}
return false
}
@@ -593,7 +587,7 @@ func containsExecutionStatusReferences(expressions []string) bool {
if !LooksLikeContainsResultRefs(expressions) {
for _, e := range expressions {
// check if it contains context variable accessing execution status - $(tasks.taskname.status)
- // or an aggregate status - $(tasks.status) or reason - $(tasks.taskname.reason)
+ // or an aggregate status - $(tasks.status)
if containsExecutionStatusRef(e) {
return true
}
@@ -610,17 +604,10 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s
if expression == PipelineTasksAggregateStatus {
continue
}
- // check if it contains context variable accessing execution status - $(tasks.taskname.status) | $(tasks.taskname.reason)
+ // check if it contains context variable accessing execution status - $(tasks.taskname.status)
if containsExecutionStatusRef(expression) {
- var pt string
- if strings.HasSuffix(expression, ".status") {
- // strip tasks. and .status from tasks.taskname.status to further verify task name
- pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
- }
- if strings.HasSuffix(expression, ".reason") {
- // strip tasks. and .reason from tasks.taskname.reason to further verify task name
- pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".reason")
- }
+ // strip tasks. and .status from tasks.taskname.status to further verify task name
+ pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
// report an error if the task name does not exist in the list of dag tasks
if !ptNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath))
@@ -628,7 +615,6 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s
}
}
}
-
return errs
}
@@ -900,28 +886,6 @@ func validateStringResults(results []TaskResult, resultName string) (errs *apis.
return errs
}
-// validateArtifactReference ensure that the feature flag enableArtifacts is set to true when using artifacts
-func validateArtifactReference(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
- if config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
- return errs
- }
- for i, t := range tasks {
- for _, v := range t.Params.extractValues() {
- if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
- return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("tasks", i))
- }
- }
- }
- for i, t := range finalTasks {
- for _, v := range t.Params.extractValues() {
- if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
- return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("finally", i))
- }
- }
- }
- return errs
-}
-
// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters
// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks.
// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])",
diff --git a/upstream/pkg/apis/pipeline/v1/pipeline_validation_test.go b/upstream/pkg/apis/pipeline/v1/pipeline_validation_test.go
index 2dd9b22d55b..609aba85a1d 100644
--- a/upstream/pkg/apis/pipeline/v1/pipeline_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/pipeline_validation_test.go
@@ -58,8 +58,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
p: &Pipeline{
ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
Spec: PipelineSpec{
- Tasks: []PipelineTask{{
- Name: "foo",
+ Tasks: []PipelineTask{{Name: "foo",
TaskSpec: &EmbeddedTask{
TypeMeta: runtime.TypeMeta{
APIVersion: "example.dev/v0",
@@ -67,8 +66,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
},
Spec: runtime.RawExtension{
Raw: []byte(`{"field1":123,"field2":"value"}`),
- },
- },
+ }},
}},
},
},
@@ -116,65 +114,6 @@ func TestPipeline_Validate_Success(t *testing.T) {
},
},
},
- }, {
- name: "propagating params into Step",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Type: ParamTypeArray,
- Default: &ParamValue{
- Type: ParamTypeArray,
- ArrayVal: []string{"hello", "pipeline"},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words[*])"},
- }},
- }},
- }},
- },
- },
- }, {
- name: "propagating object params with pipelinespec and taskspec",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Default: &ParamValue{
- Type: ParamTypeObject,
- ObjectVal: map[string]string{"hello": "pipeline"},
- },
- Type: ParamTypeObject,
- Properties: map[string]PropertySpec{
- "hello": {Type: ParamTypeString},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
- }},
- }},
- }},
- },
- },
}, {
name: "param with different type of values without matrix",
p: &Pipeline{
@@ -200,7 +139,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
Name: "echo",
Image: "ubuntu",
Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
+ Args: []string{"hello"},
}},
}},
Params: Params{
@@ -227,7 +166,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
Name: "echo",
Image: "ubuntu",
Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
+ Args: []string{"hello"},
}},
}},
}},
@@ -258,7 +197,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
Name: "echo",
Image: "ubuntu",
Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
+ Args: []string{"hello"},
}},
}},
Params: Params{
@@ -285,7 +224,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
Name: "echo",
Image: "ubuntu",
Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
+ Args: []string{"hello"},
}},
}},
Matrix: &Matrix{
@@ -298,31 +237,6 @@ func TestPipeline_Validate_Success(t *testing.T) {
}},
},
},
- }, {
- name: "valid pipeline with pipeline task and final task referencing artifacts in task params with enable-artifacts flag true",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag false",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }, {
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "true",
- "enable-api-fields": "alpha"})
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -512,8 +426,7 @@ func TestPipeline_Validate_Failure(t *testing.T) {
return cfgtesting.SetFeatureFlags(ctx, t,
map[string]string{
"disable-inline-spec": "pipeline",
- "enable-api-fields": "alpha",
- })
+ "enable-api-fields": "alpha"})
},
}, {
name: "pipelineSpec when disable-inline-spec all",
@@ -533,8 +446,7 @@ func TestPipeline_Validate_Failure(t *testing.T) {
return cfgtesting.SetFeatureFlags(ctx, t,
map[string]string{
"disable-inline-spec": "pipeline,taskrun,pipelinerun",
- "enable-api-fields": "alpha",
- })
+ "enable-api-fields": "alpha"})
},
}, {
name: "taskSpec when disable-inline-spec",
@@ -576,78 +488,6 @@ func TestPipeline_Validate_Failure(t *testing.T) {
},
})
},
- }, {
- name: "propagating params with pipelinespec and taskspec",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Type: ParamTypeArray,
- Default: &ParamValue{
- Type: ParamTypeArray,
- ArrayVal: []string{"hello", "pipeline"},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.random-words[*])"},
- }},
- }},
- }},
- },
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.random-words[*])"`,
- Paths: []string{"spec.tasks[0].steps[0].args[0]"},
- },
- }, {
- name: "propagating params to taskRef",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "hello",
- Type: ParamTypeString,
- Default: &ParamValue{
- Type: ParamTypeString,
- StringVal: "hi",
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskRef: &TaskRef{
- Name: "remote-task",
- },
- Params: Params{{
- Name: "param1",
- Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(params.param1)",
- },
- }, {
- Name: "holla",
- Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(params.hello)",
- },
- }},
- }},
- },
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.param1)"`,
- Paths: []string{"spec.tasks[0].params[param1]"},
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1306,59 +1146,6 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) {
Message: `missing field(s)`,
Paths: []string{"tasks[1].when[0]", "finally[0].when[0]"},
},
- }, {
- name: "invalid pipeline with one pipeline task referencing artifacts in task params with enable-artifacts flag false",
- ps: &PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag false",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }, {
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-artifacts should be set to true to use artifacts feature.`,
- Paths: []string{"tasks[1].params"},
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "false",
- "enable-api-fields": "alpha"})
- },
- }, {
- name: "invalid pipeline with one final pipeline task referencing artifacts in params with enable-artifacts flag false",
- ps: &PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag false",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }},
- Finally: []PipelineTask{{
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "false",
- "enable-api-fields": "alpha"})
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-artifacts should be set to true to use artifacts feature.`,
- Paths: []string{"finally[0].params"},
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1578,66 +1365,64 @@ func TestFinallyTaskResultsToPipelineResults_Success(t *testing.T) {
name string
p *Pipeline
wc func(context.Context) context.Context
- }{
- {
- name: "valid pipeline with pipeline results",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Results: []PipelineResult{{
- Name: "initialized",
- Value: *NewStructuredValues("$(tasks.clone-app-repo.results.initialized)"),
- }},
- Tasks: []PipelineTask{{
- Name: "clone-app-repo",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "initialized",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo", Image: "bar",
- }},
+ }{{
+ name: "valid pipeline with pipeline results",
+ p: &Pipeline{
+ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
+ Spec: PipelineSpec{
+ Results: []PipelineResult{{
+ Name: "initialized",
+ Value: *NewStructuredValues("$(tasks.clone-app-repo.results.initialized)"),
+ }},
+ Tasks: []PipelineTask{{
+ Name: "clone-app-repo",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "initialized",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo", Image: "bar",
}},
}},
- },
+ }},
},
- }, {
- name: "referencing existent finally task result",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Results: []PipelineResult{{
- Name: "initialized",
- Value: *NewStructuredValues("$(finally.check-git-commit.results.init)"),
- }},
- Tasks: []PipelineTask{{
- Name: "clone-app-repo",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "current-date-unix-timestamp",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo", Image: "bar",
- }},
+ }}, {
+ name: "referencing existent finally task result",
+ p: &Pipeline{
+ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
+ Spec: PipelineSpec{
+ Results: []PipelineResult{{
+ Name: "initialized",
+ Value: *NewStructuredValues("$(finally.check-git-commit.results.init)"),
+ }},
+ Tasks: []PipelineTask{{
+ Name: "clone-app-repo",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "current-date-unix-timestamp",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo", Image: "bar",
}},
}},
- Finally: []PipelineTask{{
- Name: "check-git-commit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "init",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo2", Image: "bar",
- }},
+ }},
+ Finally: []PipelineTask{{
+ Name: "check-git-commit",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "init",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo2", Image: "bar",
}},
}},
- },
+ }},
},
},
+ },
}
for _, tt := range tests {
@@ -1881,8 +1666,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.baz)", "and", "$(params.foo-is-baz)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid star array parameter variables in matrix",
@@ -1897,8 +1681,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.baz[*])", "and", "$(params.foo-is-baz[*])"}},
- }},
- },
+ }}},
}},
}, {
name: "array param - using the whole variable as a param's value that is intended to be array type",
@@ -1924,13 +1707,9 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Include: IncludeParamsList{{
Name: "build-1",
- Params: Params{
- {
- Name: "a-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.baz)"},
- },
- },
- }},
- },
+ Params: Params{{
+ Name: "a-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.baz)"}},
+ }}}},
}},
}, {
name: "object param - using single individual variable in string param",
@@ -2022,8 +1801,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.key1)", "and", "$(params.myObject.key2)"}},
- }},
- },
+ }}},
}},
}, {
name: "object param - using the whole variable as a param's value that is intended to be object type",
@@ -2205,8 +1983,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2223,8 +2000,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.foo)", "and", "$(params.does-not-exist)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2242,9 +2018,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.foo)"}},
}, {
- Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}},
- }},
- },
+ Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}}}}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2265,8 +2039,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
}, {
Name: "b-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.does-not-exist)"},
}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2405,8 +2178,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.key1)"}},
}, {
Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.non-exist-key)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.myObject.non-exist-key)"`,
@@ -3547,7 +3319,6 @@ func TestValidateFinalTasks_Failure(t *testing.T) {
})
}
}
-
func TestContextValid(t *testing.T) {
tests := []struct {
name string
@@ -3563,8 +3334,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun name",
@@ -3577,8 +3347,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun namespace",
@@ -3591,8 +3360,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.namespace)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun uid",
@@ -3605,8 +3373,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.uid)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid array context variables for Pipeline and PipelineRun names",
@@ -3619,8 +3386,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.name)", "and", "$(context.pipelineRun.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineTask retries",
@@ -3633,8 +3399,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{StringVal: "$(context.pipelineTask.retries)"},
- }},
- },
+ }}},
}},
}, {
name: "valid array context variable for PipelineTask retries",
@@ -3647,8 +3412,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineTask.retries)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for Pipeline name in include params",
@@ -3662,10 +3426,8 @@ func TestContextValid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.name)"},
- }},
- }},
- },
+ Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.name)"}}},
+ }}},
}},
}, {
name: "valid string context variable for PipelineTask retries in matrix include",
@@ -3679,10 +3441,8 @@ func TestContextValid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.retries)"},
- }},
- }},
- },
+ Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.retries)"}}},
+ }}},
}},
}}
for _, tt := range tests {
@@ -3710,8 +3470,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipeline.missing)"`,
@@ -3731,8 +3490,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineRun.missing)"`,
@@ -3752,8 +3510,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipelineTask.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineTask.missing)"`,
@@ -3773,8 +3530,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.missing-foo)", "$(context.pipelineTask.missing-foo)", "$(context.pipelineRun.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric(`non-existent variable in "$(context.pipeline.missing)"`, "value").
Also(apis.ErrGeneric(`non-existent variable in "$(context.pipelineRun.missing)"`, "value")).
@@ -3791,10 +3547,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipeline.missing)"`,
@@ -3809,10 +3563,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineRun.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineRun.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineRun.missing)"`,
@@ -3827,10 +3579,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineTask.missing)"`,
@@ -3866,8 +3616,6 @@ func TestPipelineTasksExecutionStatus(t *testing.T) {
TaskRef: &TaskRef{Name: "bar-task"},
Params: Params{{
Name: "foo-status", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.foo.status)"},
- }, {
- Name: "foo-reason", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.foo.reason)"},
}, {
Name: "tasks-status", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.status)"},
}},
@@ -3875,10 +3623,6 @@ func TestPipelineTasksExecutionStatus(t *testing.T) {
Input: "$(tasks.foo.status)",
Operator: selection.In,
Values: []string{"Failure"},
- }, {
- Input: "$(tasks.foo.reason)",
- Operator: selection.In,
- Values: []string{"Failed"},
}, {
Input: "$(tasks.status)",
Operator: selection.In,
@@ -4116,8 +3860,7 @@ func TestMatrixIncompatibleAPIVersions(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}
tests := []struct {
name string
@@ -4180,8 +3923,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
}},
@@ -4195,8 +3937,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "barfoo", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"bar", "foo"}},
}},
@@ -4209,16 +3950,14 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.foo-task.results.a-result)"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
Matrix: &Matrix{
Params: Params{{
Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.bar-task.results.b-result)"}},
- }},
- },
+ }}},
}},
}, {
name: "parameters in matrix contain whole array results references",
@@ -4228,8 +3967,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.foo-task.results.a-task-results[*])"}},
- }},
- },
+ }}},
}},
}, {
name: "results from matrixed task consumed in tasks through parameters",
@@ -4239,8 +3977,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4256,8 +3993,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}},
finally: PipelineTaskList{{
Name: "b-task",
@@ -4274,8 +4010,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4298,8 +4033,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4317,8 +4051,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}},
finally: PipelineTaskList{{
Name: "b-task",
@@ -4337,8 +4070,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4367,8 +4099,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "echoarrayurl",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
@@ -4390,8 +4121,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4400,8 +4130,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4435,8 +4164,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "task-consuming-results",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
@@ -4466,16 +4194,14 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4484,8 +4210,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4518,8 +4243,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4528,24 +4252,21 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
Matrix: &Matrix{
Params: Params{{
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4554,8 +4275,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.array-result.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.array-result.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4588,8 +4308,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4598,24 +4317,21 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
Matrix: &Matrix{
Params: Params{{
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "task-consuming-results",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4624,8 +4340,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
Params: Params{{
Name: "b-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.matrix-emitting-results-embedded.results.report-url[0])"},
@@ -4642,16 +4357,14 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4660,8 +4373,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4767,144 +4479,144 @@ func TestGetIndexingReferencesToArrayParams(t *testing.T) {
name string
spec PipelineSpec
want sets.String
- }{
- {
- name: "references in task params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeString},
- },
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[1])")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("$(params.second-param[0])")},
- {Name: "first-task-third-param", Value: *NewStructuredValues("static value")},
- },
- }},
+ }{{
+ name: "references in task params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeString},
},
- want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
- }, {
- name: "references in when expression",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeString},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[1])")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("$(params.second-param[0])")},
+ {Name: "first-task-third-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- When: []WhenExpression{{
- Input: "$(params.first-param[1])",
- Operator: selection.In,
- Values: []string{"$(params.second-param[0])"},
- }},
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
+ }, {
+ name: "references in when expression",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeString},
},
- want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
- }, {
- name: "nested references in task params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- },
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(input.workspace.$(params.first-param[0]))")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("$(input.workspace.$(params.second-param[1]))")},
- },
+ Tasks: []PipelineTask{{
+ When: []WhenExpression{{
+ Input: "$(params.first-param[1])",
+ Operator: selection.In,
+ Values: []string{"$(params.second-param[0])"},
}},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
+ }, {
+ name: "nested references in task params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "array parameter",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default", "array", "value")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(input.workspace.$(params.first-param[0]))")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("$(input.workspace.$(params.second-param[1]))")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("firstelement", "$(params.first-param)")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("firstelement", "$(params.second-param[0])")},
- },
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "array parameter",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default", "array", "value")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.second-param[0])"),
- }, {
- name: "references in finally params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("firstelement", "$(params.first-param)")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("firstelement", "$(params.second-param[0])")},
},
- Finally: []PipelineTask{{
- Params: Params{
- {Name: "final-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
- {Name: "final-task-second-param", Value: *NewStructuredValues("$(params.second-param[1])")},
- },
- }},
+ }},
+ },
+ want: sets.NewString("$(params.second-param[0])"),
+ }, {
+ name: "references in finally params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "references in finally when expressions",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Finally: []PipelineTask{{
+ Params: Params{
+ {Name: "final-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
+ {Name: "final-task-second-param", Value: *NewStructuredValues("$(params.second-param[1])")},
},
- Finally: []PipelineTask{{
- When: WhenExpressions{{
- Input: "$(params.first-param[0])",
- Operator: selection.In,
- Values: []string{"$(params.second-param[1])"},
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "references in finally when expressions",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
+ },
+ Finally: []PipelineTask{{
+ When: WhenExpressions{{
+ Input: "$(params.first-param[0])",
+ Operator: selection.In,
+ Values: []string{"$(params.second-param[1])"},
}},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "parameter references with bracket notation and special characters",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second/param", Type: ParamTypeArray},
+ {Name: "third.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "fourth/param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "parameter references with bracket notation and special characters",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second/param", Type: ParamTypeArray},
- {Name: "third.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "fourth/param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
+ {Name: "first-task-second-param", Value: *NewStructuredValues(`$(params["second.param"][0])`)},
+ {Name: "first-task-third-param", Value: *NewStructuredValues(`$(params['third.param'][1])`)},
+ {Name: "first-task-fourth-param", Value: *NewStructuredValues(`$(params['fourth/param'][1])`)},
+ {Name: "first-task-fifth-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
- {Name: "first-task-second-param", Value: *NewStructuredValues(`$(params["second.param"][0])`)},
- {Name: "first-task-third-param", Value: *NewStructuredValues(`$(params['third.param'][1])`)},
- {Name: "first-task-fourth-param", Value: *NewStructuredValues(`$(params['fourth/param'][1])`)},
- {Name: "first-task-fifth-param", Value: *NewStructuredValues("static value")},
- },
- }},
+ }},
+ },
+ want: sets.NewString(`$(params["first.param"][0])`, `$(params["second.param"][0])`, `$(params['third.param'][1])`, `$(params['fourth/param'][1])`),
+ }, {
+ name: "single parameter in workspace subpath",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString(`$(params["first.param"][0])`, `$(params["second.param"][0])`, `$(params['third.param'][1])`, `$(params['fourth/param'][1])`),
- }, {
- name: "single parameter in workspace subpath",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("static value")},
- },
- Workspaces: []WorkspacePipelineTaskBinding{
- {
- Name: "first-workspace",
- Workspace: "first-workspace",
- SubPath: "$(params.second-param[1])",
- },
+ Workspaces: []WorkspacePipelineTaskBinding{
+ {
+ Name: "first-workspace",
+ Workspace: "first-workspace",
+ SubPath: "$(params.second-param[1])",
},
- }},
- },
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ },
+ }},
},
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ },
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got := tt.spec.GetIndexingReferencesToArrayParams()
diff --git a/upstream/pkg/apis/pipeline/v1/pipelineref_validation.go b/upstream/pkg/apis/pipeline/v1/pipelineref_validation.go
index c23db32a50a..9fa7c9894d0 100644
--- a/upstream/pkg/apis/pipeline/v1/pipelineref_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/pipelineref_validation.go
@@ -19,6 +19,7 @@ package v1
import (
"context"
+ "github.com/tektoncd/pipeline/pkg/apis/config"
"knative.dev/pkg/apis"
)
@@ -26,7 +27,28 @@ import (
// correctly. No errors are returned for a nil PipelineRef.
func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
- return errs
+ return
}
- return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
+
+ if ref.Resolver != "" || ref.Params != nil {
+ if ref.Resolver != "" {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
+ }
+ }
+ if ref.Params != nil {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
+ }
+ if ref.Resolver == "" {
+ errs = errs.Also(apis.ErrMissingField("resolver"))
+ }
+ errs = errs.Also(ValidateParameters(ctx, ref.Params))
+ }
+ } else if ref.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("name"))
+ }
+ return
}
diff --git a/upstream/pkg/apis/pipeline/v1/pipelineref_validation_test.go b/upstream/pkg/apis/pipeline/v1/pipelineref_validation_test.go
index 06bceb31fc9..114eb6a0f10 100644
--- a/upstream/pkg/apis/pipeline/v1/pipelineref_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/pipelineref_validation_test.go
@@ -37,13 +37,6 @@ func TestPipelineRef_Invalid(t *testing.T) {
name: "pipelineRef without Pipeline Name",
ref: &v1.PipelineRef{},
wantErr: apis.ErrMissingField("name"),
- }, {
- name: "invalid pipelineref name",
- ref: &v1.PipelineRef{Name: "_foo"},
- wantErr: &apis.FieldError{
- Message: `invalid value: name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`,
- Paths: []string{"name"},
- },
}, {
name: "pipelineref resolver disallowed without beta feature gate",
ref: &v1.PipelineRef{
@@ -72,45 +65,31 @@ func TestPipelineRef_Invalid(t *testing.T) {
wantErr: apis.ErrMissingField("resolver"),
withContext: cfgtesting.EnableBetaAPIFields,
}, {
- name: "pipelineRef with resolver and k8s style name",
+ name: "pipelineref resolver disallowed in conjunction with pipelineref name",
ref: &v1.PipelineRef{
Name: "foo",
ResolverRef: v1.ResolverRef{
- Resolver: "git",
+ Resolver: "bar",
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- withContext: enableConciseResolverSyntax,
- }, {
- name: "pipelineRef with url-like name without resolver",
- ref: &v1.PipelineRef{
- Name: "https://foo.com/bar",
- },
- wantErr: apis.ErrMissingField("resolver"),
- withContext: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
+ withContext: cfgtesting.EnableBetaAPIFields,
}, {
- name: "pipelineRef params disallowed in conjunction with pipelineref name",
+ name: "pipelineref params disallowed in conjunction with pipelineref name",
ref: &v1.PipelineRef{
- Name: "https://foo/bar",
+ Name: "bar",
ResolverRef: v1.ResolverRef{
- Resolver: "git",
- Params: v1.Params{{Name: "foo", Value: v1.ParamValue{StringVal: "bar"}}},
+ Params: v1.Params{{
+ Name: "foo",
+ Value: v1.ParamValue{
+ Type: v1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- withContext: enableConciseResolverSyntax,
- }, {
- name: "pipelineRef with url-like name without enable-concise-resolver-syntax",
- ref: &v1.PipelineRef{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
- }, {
- name: "pipelineRef without enable-concise-resolver-syntax",
- ref: &v1.PipelineRef{Name: "https://foo.com/bar", ResolverRef: v1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- },
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
+ withContext: cfgtesting.EnableBetaAPIFields,
}}
for _, tc := range tests {
diff --git a/upstream/pkg/apis/pipeline/v1/pipelinerun_validation.go b/upstream/pkg/apis/pipeline/v1/pipelinerun_validation.go
index 16330aa2153..d45b00ab8e3 100644
--- a/upstream/pkg/apis/pipeline/v1/pipelinerun_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/pipelinerun_validation.go
@@ -27,7 +27,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/webhook/resourcesemantics"
@@ -56,9 +55,6 @@ func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError {
// Validate pipelinerun spec
func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
- // Validate the spec changes
- errs = errs.Also(ps.ValidateUpdate(ctx))
-
// Must have exactly one of pipelineRef and pipelineSpec.
if ps.PipelineRef == nil && ps.PipelineSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("pipelineRef", "pipelineSpec"))
@@ -128,31 +124,6 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError)
return errs
}
-// ValidateUpdate validates the update of a PipelineRunSpec
-func (ps *PipelineRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
- if !apis.IsInUpdate(ctx) {
- return
- }
- oldObj, ok := apis.GetBaseline(ctx).(*PipelineRun)
- if !ok || oldObj == nil {
- return
- }
- old := &oldObj.Spec
-
- // If already in the done state, the spec cannot be modified. Otherwise, only the status field can be modified.
- tips := "Once the PipelineRun is complete, no updates are allowed"
- if !oldObj.IsDone() {
- old = old.DeepCopy()
- old.Status = ps.Status
- tips = "Once the PipelineRun has started, only status updates are allowed"
- }
- if !equality.Semantic.DeepEqual(old, ps) {
- errs = errs.Also(apis.ErrInvalidValue(tips, ""))
- }
-
- return
-}
-
func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) {
if len(ps.Params) == 0 {
return errs
@@ -315,11 +286,11 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM
func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec) (errs *apis.FieldError) {
if trs.StepSpecs != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.BetaAPIFields).ViaField("stepSpecs"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.AlphaAPIFields).ViaField("stepSpecs"))
errs = errs.Also(validateStepSpecs(trs.StepSpecs).ViaField("stepSpecs"))
}
if trs.SidecarSpecs != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.BetaAPIFields).ViaField("sidecarSpecs"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.AlphaAPIFields).ViaField("sidecarSpecs"))
errs = errs.Also(validateSidecarSpecs(trs.SidecarSpecs).ViaField("sidecarSpecs"))
}
if trs.ComputeResources != nil {
diff --git a/upstream/pkg/apis/pipeline/v1/pipelinerun_validation_test.go b/upstream/pkg/apis/pipeline/v1/pipelinerun_validation_test.go
index 42a38ed7c3f..8e7f24a0e87 100644
--- a/upstream/pkg/apis/pipeline/v1/pipelinerun_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/pipelinerun_validation_test.go
@@ -22,7 +22,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
@@ -32,7 +31,6 @@ import (
corev1resources "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
- duckv1 "knative.dev/pkg/apis/duck/v1"
)
func TestPipelineRun_Invalid(t *testing.T) {
@@ -972,7 +970,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
wantErr: apis.ErrMultipleOneOf("taskRunSpecs[0].stepSpecs[1].name"),
withContext: cfgtesting.EnableAlphaAPIFields,
}, {
- name: "stepSpecs disallowed without beta feature gate",
+ name: "stepSpecs disallowed without alpha feature gate",
spec: v1.PipelineRunSpec{
PipelineRef: &v1.PipelineRef{Name: "foo"},
TaskRunSpecs: []v1.PipelineTaskRunSpec{
@@ -988,9 +986,9 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
},
},
withContext: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("stepSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
+ wantErr: apis.ErrGeneric("stepSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
}, {
- name: "sidecarSpec disallowed without beta feature gate",
+ name: "sidecarSpec disallowed without alpha feature gate",
spec: v1.PipelineRunSpec{
PipelineRef: &v1.PipelineRef{Name: "foo"},
TaskRunSpecs: []v1.PipelineTaskRunSpec{
@@ -1006,7 +1004,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
},
},
withContext: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("sidecarSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
+ wantErr: apis.ErrGeneric("sidecarSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
}, {
name: "missing stepSpecs name",
spec: v1.PipelineRunSpec{
@@ -1079,7 +1077,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
"taskRunSpecs[0].stepSpecs.resources",
"taskRunSpecs[0].computeResources",
),
- withContext: cfgtesting.EnableBetaAPIFields,
+ withContext: cfgtesting.EnableAlphaAPIFields,
}, {
name: "computeResources disallowed without beta feature gate",
spec: v1.PipelineRunSpec{
@@ -1142,7 +1140,7 @@ func TestPipelineRunSpec_Validate(t *testing.T) {
},
}},
},
- withContext: cfgtesting.EnableBetaAPIFields,
+ withContext: cfgtesting.EnableAlphaAPIFields,
}, {
name: "valid sidecar and task-level (taskRunSpecs.resources) resource requirements configured",
spec: v1.PipelineRunSpec{
@@ -1165,7 +1163,7 @@ func TestPipelineRunSpec_Validate(t *testing.T) {
}},
}},
},
- withContext: cfgtesting.EnableBetaAPIFields,
+ withContext: cfgtesting.EnableAlphaAPIFields,
}}
for _, ps := range tests {
@@ -1513,180 +1511,3 @@ func TestPipelineRunSpecBetaFeatures(t *testing.T) {
})
}
}
-
-func TestPipelineRunSpec_ValidateUpdate(t *testing.T) {
- tests := []struct {
- name string
- isCreate bool
- isUpdate bool
- baselinePipelineRun *v1.PipelineRun
- pipelineRun *v1.PipelineRun
- expectedError apis.FieldError
- }{
- {
- name: "is create ctx",
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{},
- },
- isCreate: true,
- isUpdate: false,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, no changes",
- baselinePipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "",
- },
- },
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is nil, skip validation",
- baselinePipelineRun: nil,
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Timeouts: &v1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, status changes from Empty to Cancelled",
- baselinePipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "",
- },
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "Cancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, timeouts changes",
- baselinePipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "",
- Timeouts: &v1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 0},
- },
- },
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Timeouts: &v1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun has started, only status updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is unknown, status changes from PipelineRunPending to Empty, and timeouts changes",
- baselinePipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "PipelineRunPending",
- Timeouts: &v1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 0},
- },
- },
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "",
- Timeouts: &v1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun has started, only status updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is done, status changes",
- baselinePipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "PipelineRunPending",
- },
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue},
- },
- },
- },
- },
- pipelineRun: &v1.PipelineRun{
- Spec: v1.PipelineRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun is complete, no updates are allowed`,
- Paths: []string{""},
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{},
- Defaults: &config.Defaults{},
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselinePipelineRun)
- }
- pr := tt.pipelineRun
- err := pr.Spec.ValidateUpdate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("PipelineRunSpec.ValidateUpdate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1/result_types.go b/upstream/pkg/apis/pipeline/v1/result_types.go
index b36bf6fc658..6361d7a362d 100644
--- a/upstream/pkg/apis/pipeline/v1/result_types.go
+++ b/upstream/pkg/apis/pipeline/v1/result_types.go
@@ -40,7 +40,7 @@ type TaskResult struct {
// StepResult used to describe the Results of a Step.
//
-// This is field is at an BETA stability level and gated by "enable-step-actions" feature flag.
+// This is field is at an ALPHA stability level and gated by "enable-step-actions" feature flag.
type StepResult struct {
// Name the given name
Name string `json:"name"`
diff --git a/upstream/pkg/apis/pipeline/v1/result_validation_test.go b/upstream/pkg/apis/pipeline/v1/result_validation_test.go
index e927230f037..ed71596b90b 100644
--- a/upstream/pkg/apis/pipeline/v1/result_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/result_validation_test.go
@@ -297,10 +297,10 @@ func TestExtractStepResultName(t *testing.T) {
t.Errorf("Did not expect an error but got: %v", err)
}
if d := cmp.Diff(tt.wantStep, gotStep); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tt.wantResult, gotResult); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -320,7 +320,7 @@ func TestExtractStepResultNameError(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
gotStep, gotResult, err := v1.ExtractStepResultName(tt.value)
if d := cmp.Diff(tt.wantErr.Error(), err.Error()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if gotStep != "" {
t.Errorf("Expected an empty string but got: %v", gotStep)
diff --git a/upstream/pkg/apis/pipeline/v1/resultref_test.go b/upstream/pkg/apis/pipeline/v1/resultref_test.go
index 914f4bff5a1..a5790b290fb 100644
--- a/upstream/pkg/apis/pipeline/v1/resultref_test.go
+++ b/upstream/pkg/apis/pipeline/v1/resultref_test.go
@@ -761,7 +761,7 @@ func TestHasResultReferenceWhenExpression(t *testing.T) {
}
got := v1.NewResultRefs(expressions)
if d := cmp.Diff(tt.wantRef, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -870,8 +870,7 @@ func TestPipelineTaskResultRefs(t *testing.T) {
Value: *v1.NewStructuredValues("$(tasks.pt5.results.r5)", "$(tasks.pt6.results.r6)"),
}, {
Value: *v1.NewStructuredValues("$(tasks.pt7.results.r7)", "$(tasks.pt8.results.r8)"),
- }},
- },
+ }}},
TaskSpec: &v1.EmbeddedTask{
TaskSpec: v1.TaskSpec{
Steps: []v1.Step{
@@ -951,12 +950,11 @@ func TestParseResultName(t *testing.T) {
name string
input string
want []string
- }{
- {
- name: "array indexing",
- input: "anArrayResult[1]",
- want: []string{"anArrayResult", "1"},
- },
+ }{{
+ name: "array indexing",
+ input: "anArrayResult[1]",
+ want: []string{"anArrayResult", "1"},
+ },
{
name: "array star reference",
input: "anArrayResult[*]",
@@ -978,38 +976,37 @@ func TestGetVarSubstitutionExpressionsForPipelineResult(t *testing.T) {
name string
result v1.PipelineResult
want []string
- }{
- {
- name: "get string result expressions",
- result: v1.PipelineResult{
- Name: "string result",
- Type: v1.ResultsTypeString,
- Value: *v1.NewStructuredValues("$(tasks.task1.results.result1) and $(tasks.task2.results.result2)"),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
- }, {
- name: "get array result expressions",
- result: v1.PipelineResult{
- Name: "array result",
- Type: v1.ResultsTypeString,
- Value: *v1.NewStructuredValues("$(tasks.task1.results.result1)", "$(tasks.task2.results.result2)"),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
- }, {
- name: "get object result expressions",
- result: v1.PipelineResult{
- Name: "object result",
- Type: v1.ResultsTypeString,
- Value: *v1.NewObject(map[string]string{
- "key1": "$(tasks.task1.results.result1)",
- "key2": "$(tasks.task2.results.result2) and another one $(tasks.task3.results.result3)",
- "key3": "no ref here",
- }),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2", "tasks.task3.results.result3"},
+ }{{
+ name: "get string result expressions",
+ result: v1.PipelineResult{
+ Name: "string result",
+ Type: v1.ResultsTypeString,
+ Value: *v1.NewStructuredValues("$(tasks.task1.results.result1) and $(tasks.task2.results.result2)"),
+ },
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
+ }, {
+ name: "get array result expressions",
+ result: v1.PipelineResult{
+ Name: "array result",
+ Type: v1.ResultsTypeString,
+ Value: *v1.NewStructuredValues("$(tasks.task1.results.result1)", "$(tasks.task2.results.result2)"),
+ },
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
+ }, {
+ name: "get object result expressions",
+ result: v1.PipelineResult{
+ Name: "object result",
+ Type: v1.ResultsTypeString,
+ Value: *v1.NewObject(map[string]string{
+ "key1": "$(tasks.task1.results.result1)",
+ "key2": "$(tasks.task2.results.result2) and another one $(tasks.task3.results.result3)",
+ "key3": "no ref here",
+ }),
},
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2", "tasks.task3.results.result3"},
+ },
}
- sortStrings := func(x, y string) bool {
+ var sortStrings = func(x, y string) bool {
return x < y
}
for _, tt := range tests {
diff --git a/upstream/pkg/apis/pipeline/v1/swagger.json b/upstream/pkg/apis/pipeline/v1/swagger.json
index 73544e75d84..8993af2ffff 100644
--- a/upstream/pkg/apis/pipeline/v1/swagger.json
+++ b/upstream/pkg/apis/pipeline/v1/swagger.json
@@ -28,14 +28,6 @@
"default": ""
}
},
- "priorityClassName": {
- "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- "type": "string"
- },
- "securityContext": {
- "description": "SecurityContext sets the security context for the pod",
- "$ref": "#/definitions/v1.PodSecurityContext"
- },
"tolerations": {
"description": "If specified, the pod's tolerations.",
"type": "array",
@@ -163,16 +155,11 @@
"description": "TaskRunStepArtifact represents an artifact produced or used by a step within a task run. It directly uses the Artifact type for its structure.",
"type": "object",
"properties": {
- "buildOutput": {
- "description": "Indicate if the artifact is a build output or a by-product",
- "type": "boolean"
- },
"name": {
- "description": "The artifact's identifying category name",
"type": "string"
},
"values": {
- "description": "A collection of values related to the artifact",
+ "description": "The artifact's identifying category name",
"type": "array",
"items": {
"default": {},
@@ -302,6 +289,7 @@
},
"spec": {
"description": "Spec is a specification of a custom task",
+ "default": {},
"$ref": "#/definitions/k8s.io.apimachinery.pkg.runtime.RawExtension"
},
"stepTemplate": {
@@ -393,6 +381,7 @@
"default": ""
},
"value": {
+ "default": {},
"$ref": "#/definitions/v1.ParamValue"
}
}
@@ -564,6 +553,7 @@
},
"value": {
"description": "Value the expression used to retrieve the value",
+ "default": {},
"$ref": "#/definitions/v1.ParamValue"
}
}
@@ -634,6 +624,7 @@
},
"value": {
"description": "Value is the result returned from the execution of this PipelineRun",
+ "default": {},
"$ref": "#/definitions/v1.ParamValue"
}
}
@@ -1338,10 +1329,6 @@
"description": "Periodic probe of Sidecar service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
"$ref": "#/definitions/v1.Probe"
},
- "restartPolicy": {
- "description": "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
- "type": "string"
- },
"script": {
"description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.",
"type": "string"
@@ -1598,14 +1585,6 @@
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge"
},
- "when": {
- "description": "When is a list of when expressions that need to be true for the task to run",
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1.WhenExpression"
- }
- },
"workingDir": {
"description": "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"type": "string"
@@ -1632,7 +1611,7 @@
}
},
"v1.StepResult": {
- "description": "StepResult used to describe the Results of a Step.\n\nThis is field is at an BETA stability level and gated by \"enable-step-actions\" feature flag.",
+ "description": "StepResult used to describe the Results of a Step.\n\nThis is field is at an ALPHA stability level and gated by \"enable-step-actions\" feature flag.",
"type": "object",
"required": [
"name"
@@ -1688,9 +1667,6 @@
"$ref": "#/definitions/v1.Artifact"
}
},
- "provenance": {
- "$ref": "#/definitions/v1.Provenance"
- },
"results": {
"type": "array",
"items": {
@@ -1829,14 +1805,6 @@
"description": "TaskBreakpoints defines the breakpoint config for a particular Task",
"type": "object",
"properties": {
- "beforeSteps": {
- "type": "array",
- "items": {
- "type": "string",
- "default": ""
- },
- "x-kubernetes-list-type": "atomic"
- },
"onFailure": {
"description": "if enabled, pause TaskRun on failure of a step failed step will not exit",
"type": "string"
@@ -2019,6 +1987,7 @@
},
"value": {
"description": "Value the given value of the result",
+ "default": {},
"$ref": "#/definitions/v1.ParamValue"
}
}
@@ -2139,11 +2108,6 @@
"default": ""
}
},
- "artifacts": {
- "description": "Artifacts are the list of artifacts written out by the task's containers",
- "$ref": "#/definitions/v1.Artifacts",
- "x-kubernetes-list-type": "atomic"
- },
"completionTime": {
"description": "CompletionTime is the time the build completed.",
"$ref": "#/definitions/v1.Time"
@@ -2233,11 +2197,6 @@
"podName"
],
"properties": {
- "artifacts": {
- "description": "Artifacts are the list of artifacts written out by the task's containers",
- "$ref": "#/definitions/v1.Artifacts",
- "x-kubernetes-list-type": "atomic"
- },
"completionTime": {
"description": "CompletionTime is the time the build completed.",
"$ref": "#/definitions/v1.Time"
diff --git a/upstream/pkg/apis/pipeline/v1/task_validation.go b/upstream/pkg/apis/pipeline/v1/task_validation.go
index 4232d295d2b..95708574409 100644
--- a/upstream/pkg/apis/pipeline/v1/task_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/task_validation.go
@@ -260,9 +260,6 @@ func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) {
errs = errs.Also(ValidateStepResultsVariables(ctx, s.Results, s.Script).ViaIndex(idx))
errs = errs.Also(ValidateStepResults(ctx, s.Results).ViaIndex(idx).ViaField("results"))
}
- if len(s.When) > 0 {
- errs = errs.Also(s.When.validate(ctx).ViaIndex(idx))
- }
}
return errs
}
@@ -315,10 +312,7 @@ func errorIfStepResultReferenceinField(value, fieldName string) (errs *apis.Fiel
}
func stepArtifactReferenceExists(src string) bool {
- return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.StepArtifactPathPattern+")")
-}
-func taskArtifactReferenceExists(src string) bool {
- return len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.TaskArtifactPathPattern+")")
+ return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$(step.artifacts.path)")
}
func errorIfStepArtifactReferencedInField(value, fieldName string) (errs *apis.FieldError) {
if stepArtifactReferenceExists(value) {
@@ -384,8 +378,17 @@ func validateStepResultReference(s Step) (errs *apis.FieldError) {
}
func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) {
- if err := validateArtifactsReferencesInStep(ctx, s); err != nil {
- return err
+ if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
+ var t []string
+ t = append(t, s.Script)
+ t = append(t, s.Command...)
+ t = append(t, s.Args...)
+ for _, e := range s.Env {
+ t = append(t, e.Value)
+ }
+ if slices.ContainsFunc(t, stepArtifactReferenceExists) {
+ return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), ""))
+ }
}
if s.Ref != nil {
@@ -453,11 +456,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true in order to use Results in Steps.", config.EnableStepActions), "")
}
}
- if len(s.When) > 0 {
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableStepActions && isCreateOrUpdateAndDiverged(ctx, s) {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true in order to use When in Steps.", config.EnableStepActions), "")
- }
- }
if s.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
@@ -540,22 +538,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi
return errs
}
-func validateArtifactsReferencesInStep(ctx context.Context, s Step) *apis.FieldError {
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
- var t []string
- t = append(t, s.Script)
- t = append(t, s.Command...)
- t = append(t, s.Args...)
- for _, e := range s.Env {
- t = append(t, e.Value)
- }
- if slices.ContainsFunc(t, stepArtifactReferenceExists) || slices.ContainsFunc(t, taskArtifactReferenceExists) {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "")
- }
- }
- return nil
-}
-
// ValidateParameterTypes validates all the types within a slice of ParamSpecs
func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) {
for _, p := range params {
diff --git a/upstream/pkg/apis/pipeline/v1/task_validation_test.go b/upstream/pkg/apis/pipeline/v1/task_validation_test.go
index 0ea6993648a..8f3c02b221a 100644
--- a/upstream/pkg/apis/pipeline/v1/task_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/task_validation_test.go
@@ -29,9 +29,7 @@ import (
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/utils/pointer"
"knative.dev/pkg/apis"
)
@@ -335,41 +333,6 @@ func TestTaskSpecValidate(t *testing.T) {
hello world`,
}},
},
- }, {
- name: "step template included in validation with stepaction",
- fields: fields{
- Steps: []v1.Step{{
- Name: "astep",
- Ref: &v1.Ref{
- Name: "stepAction",
- },
- }},
- StepTemplate: &v1.StepTemplate{
- Image: "some-image",
- SecurityContext: &corev1.SecurityContext{
- RunAsNonRoot: pointer.Bool(true),
- },
- VolumeMounts: []corev1.VolumeMount{{
- Name: "data",
- MountPath: "/workspace/data",
- }},
- Env: []corev1.EnvVar{{
- Name: "KEEP_THIS",
- Value: "A_VALUE",
- }, {
- Name: "SOME_KEY_1",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- Key: "A_KEY",
- LocalObjectReference: corev1.LocalObjectReference{Name: "A_NAME"},
- },
- },
- }, {
- Name: "SOME_KEY_2",
- Value: "VALUE_2",
- }},
- },
- },
}, {
name: "valid step with parameterized script",
fields: fields{
@@ -1787,7 +1750,7 @@ func TestTaskSpecValidateSuccessWithArtifactsRefFlagEnabled(t *testing.T) {
name: "reference step artifacts in Env",
Steps: []v1.Step{{
Image: "busybox",
- Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs.image)"}},
+ Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs)"}},
}},
},
{
@@ -1869,7 +1832,7 @@ func TestTaskSpecValidateErrorWithArtifactsRefFlagNotEnabled(t *testing.T) {
{
name: "Cannot reference step artifacts in Env without setting enable-artifacts to true",
Steps: []v1.Step{{
- Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs.image)"}},
+ Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs)"}},
}},
expectedError: apis.FieldError{
Message: fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts),
@@ -3128,85 +3091,3 @@ func TestTaskSpecValidate_StepResults_Error(t *testing.T) {
})
}
}
-
-func TestTaskSpecValidate_StepWhen_Error(t *testing.T) {
- tests := []struct {
- name string
- ts *v1.TaskSpec
- isCreate bool
- Results []v1.StepResult
- isUpdate bool
- baselineTaskRun *v1.TaskRun
- expectedError apis.FieldError
- EnableStepAction bool
- EnableCEL bool
- }{
- {
- name: "step when not allowed without enable step actions - create event",
- ts: &v1.TaskSpec{Steps: []v1.Step{{
- Image: "my-image",
- When: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo"}}},
- }}},
- isCreate: true,
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true in order to use When in Steps.",
- Paths: []string{"steps[0]"},
- },
- },
- {
- name: "step when not allowed without enable step actions - update and diverged event",
- ts: &v1.TaskSpec{Steps: []v1.Step{{
- Image: "my-image",
- When: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo"}}},
- }}},
- isUpdate: true,
- baselineTaskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Image: "my-image",
- Results: []v1.StepResult{{Name: "a-result"}},
- }},
- },
- },
- },
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true in order to use When in Steps.",
- Paths: []string{"steps[0]"},
- },
- },
- {
- name: "cel not allowed if EnableCELInWhenExpression is false",
- ts: &v1.TaskSpec{Steps: []v1.Step{{
- Image: "my-image",
- When: v1.StepWhenExpressions{{CEL: "'d'=='d'"}},
- }}},
- EnableStepAction: true,
- expectedError: apis.FieldError{
- Message: `feature flag enable-cel-in-whenexpression should be set to true to use CEL: 'd'=='d' in WhenExpression`,
- Paths: []string{"steps[0].when[0]"},
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{
- EnableStepActions: tt.EnableStepAction,
- EnableCELInWhenExpression: tt.EnableCEL,
- },
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselineTaskRun)
- }
- tt.ts.SetDefaults(ctx)
- err := tt.ts.Validate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("StepActionSpec.Validate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1/taskref_validation.go b/upstream/pkg/apis/pipeline/v1/taskref_validation.go
index bbc5fbbfc48..4f4e0330350 100644
--- a/upstream/pkg/apis/pipeline/v1/taskref_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/taskref_validation.go
@@ -18,7 +18,10 @@ package v1
import (
"context"
+ "strings"
+ "github.com/tektoncd/pipeline/pkg/apis/config"
+ "k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
@@ -28,5 +31,32 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
- return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
+
+ switch {
+ case ref.Resolver != "" || ref.Params != nil:
+ if ref.Resolver != "" {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
+ }
+ }
+ if ref.Params != nil {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
+ }
+ if ref.Resolver == "" {
+ errs = errs.Also(apis.ErrMissingField("resolver"))
+ }
+ errs = errs.Also(ValidateParameters(ctx, ref.Params))
+ }
+ case ref.Name != "":
+ // TaskRef name must be a valid k8s name
+ if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
+ errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
+ }
+ default:
+ errs = errs.Also(apis.ErrMissingField("name"))
+ }
+ return errs
}
diff --git a/upstream/pkg/apis/pipeline/v1/taskref_validation_test.go b/upstream/pkg/apis/pipeline/v1/taskref_validation_test.go
index a98827470e5..d2c721bc6a1 100644
--- a/upstream/pkg/apis/pipeline/v1/taskref_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/taskref_validation_test.go
@@ -120,45 +120,31 @@ func TestTaskRef_Invalid(t *testing.T) {
wantErr: apis.ErrMissingField("resolver"),
wc: cfgtesting.EnableBetaAPIFields,
}, {
- name: "taskRef with resolver and k8s style name",
+ name: "taskref resolver disallowed in conjunction with taskref name",
taskRef: &v1.TaskRef{
Name: "foo",
ResolverRef: v1.ResolverRef{
Resolver: "git",
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "taskRef with url-like name without resolver",
- taskRef: &v1.TaskRef{
- Name: "https://foo.com/bar",
- },
- wantErr: apis.ErrMissingField("resolver"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
+ wc: cfgtesting.EnableBetaAPIFields,
}, {
- name: "taskRef params disallowed in conjunction with pipelineref name",
+ name: "taskref params disallowed in conjunction with taskref name",
taskRef: &v1.TaskRef{
- Name: "https://foo/bar",
+ Name: "bar",
ResolverRef: v1.ResolverRef{
- Resolver: "git",
- Params: v1.Params{{Name: "foo", Value: v1.ParamValue{StringVal: "bar"}}},
+ Params: v1.Params{{
+ Name: "foo",
+ Value: v1.ParamValue{
+ Type: v1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "taskRef with url-like name without enable-concise-resolver-syntax",
- taskRef: &v1.TaskRef{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
- }, {
- name: "taskRef without enable-concise-resolver-syntax",
- taskRef: &v1.TaskRef{Name: "https://foo.com/bar", ResolverRef: v1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- },
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
+ wc: cfgtesting.EnableBetaAPIFields,
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
diff --git a/upstream/pkg/apis/pipeline/v1/taskrun_types.go b/upstream/pkg/apis/pipeline/v1/taskrun_types.go
index b5305937260..615eaaa788c 100644
--- a/upstream/pkg/apis/pipeline/v1/taskrun_types.go
+++ b/upstream/pkg/apis/pipeline/v1/taskrun_types.go
@@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
@@ -122,9 +121,6 @@ type TaskBreakpoints struct {
// failed step will not exit
// +optional
OnFailure string `json:"onFailure,omitempty"`
- // +optional
- // +listType=atomic
- BeforeSteps []string `json:"beforeSteps,omitempty"`
}
// NeedsDebugOnFailure return true if the TaskRun is configured to debug on failure
@@ -135,28 +131,14 @@ func (trd *TaskRunDebug) NeedsDebugOnFailure() bool {
return trd.Breakpoints.OnFailure == EnabledOnFailureBreakpoint
}
-// NeedsDebugBeforeStep return true if the step is configured to debug before execution
-func (trd *TaskRunDebug) NeedsDebugBeforeStep(stepName string) bool {
- if trd.Breakpoints == nil {
- return false
- }
- beforeStepSets := sets.NewString(trd.Breakpoints.BeforeSteps...)
- return beforeStepSets.Has(stepName)
-}
-
// StepNeedsDebug return true if the step is configured to debug
func (trd *TaskRunDebug) StepNeedsDebug(stepName string) bool {
- return trd.NeedsDebugOnFailure() || trd.NeedsDebugBeforeStep(stepName)
+ return trd.NeedsDebugOnFailure()
}
// NeedsDebug return true if defined onfailure or have any before, after steps
func (trd *TaskRunDebug) NeedsDebug() bool {
- return trd.NeedsDebugOnFailure() || trd.HaveBeforeSteps()
-}
-
-// HaveBeforeSteps return true if have any before steps
-func (trd *TaskRunDebug) HaveBeforeSteps() bool {
- return trd.Breakpoints != nil && len(trd.Breakpoints.BeforeSteps) > 0
+ return trd.NeedsDebugOnFailure()
}
// TaskRunInputs holds the input values that this task was invoked with.
@@ -297,11 +279,6 @@ type TaskRunStatusFields struct {
// +listType=atomic
Results []TaskRunResult `json:"results,omitempty"`
- // Artifacts are the list of artifacts written out by the task's containers
- // +optional
- // +listType=atomic
- Artifacts *Artifacts `json:"artifacts,omitempty"`
-
// The list has one entry per sidecar in the manifest. Each entry is
// represents the imageid of the corresponding sidecar.
// +listType=atomic
@@ -382,7 +359,6 @@ type StepState struct {
Container string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
Results []TaskRunStepResult `json:"results,omitempty"`
- Provenance *Provenance `json:"provenance,omitempty"`
TerminationReason string `json:"terminationReason,omitempty"`
Inputs []TaskRunStepArtifact `json:"inputs,omitempty"`
Outputs []TaskRunStepArtifact `json:"outputs,omitempty"`
diff --git a/upstream/pkg/apis/pipeline/v1/taskrun_types_test.go b/upstream/pkg/apis/pipeline/v1/taskrun_types_test.go
index a4b84be3549..63c16ae2a40 100644
--- a/upstream/pkg/apis/pipeline/v1/taskrun_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1/taskrun_types_test.go
@@ -31,10 +31,8 @@ import (
duckv1 "knative.dev/pkg/apis/duck/v1"
)
-var (
- now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
- testClock = clock.NewFakePassiveClock(now)
-)
+var now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
+var testClock = clock.NewFakePassiveClock(now)
func TestTaskRun_GetPipelineRunPVCName(t *testing.T) {
tests := []struct {
@@ -369,7 +367,7 @@ func TestHasTimedOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.taskRun.HasTimedOut(context.Background(), testClock)
if d := cmp.Diff(tc.expectedStatus, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -409,57 +407,6 @@ func TestInitializeTaskRunConditions(t *testing.T) {
}
}
-func TestIsDebugBeforeStep(t *testing.T) {
- type args struct {
- stepName string
- trd *v1.TaskRunDebug
- }
- testCases := []struct {
- name string
- args args
- want bool
- }{
- {
- name: "empty breakpoints",
- args: args{
- stepName: "step1",
- trd: &v1.TaskRunDebug{},
- },
- want: false,
- }, {
- name: "breakpoint before step",
- args: args{
- stepName: "step1",
- trd: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"step1", "step2"},
- },
- },
- },
- want: true,
- }, {
- name: "step not in before step breakpoint",
- args: args{
- stepName: "step3",
- trd: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"step1", "step2"},
- },
- },
- },
- want: false,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- result := tc.args.trd.NeedsDebugBeforeStep(tc.args.stepName)
- if d := cmp.Diff(result, tc.want); d != "" {
- t.Fatal(diff.PrintWantGot(d))
- }
- })
- }
-}
-
func TestIsStepNeedDebug(t *testing.T) {
type args struct {
stepName string
@@ -488,24 +435,13 @@ func TestIsStepNeedDebug(t *testing.T) {
},
},
want: true,
- }, {
- name: "breakpoint before step",
- args: args{
- stepName: "step1",
- trd: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"step1"},
- },
- },
- },
- want: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := tc.args.trd.StepNeedsDebug(tc.args.stepName)
if d := cmp.Diff(tc.want, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -536,23 +472,13 @@ func TestIsNeedDebug(t *testing.T) {
},
},
want: true,
- }, {
- name: "breakpoint before step",
- args: args{
- trd: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"step1"},
- },
- },
- },
- want: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := tc.args.trd.NeedsDebug()
if d := cmp.Diff(tc.want, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -585,7 +511,7 @@ func TestTaskRunIsRetriable(t *testing.T) {
wantIsRetriable: false,
}} {
retriesStatus := []v1.TaskRunStatus{}
- for range tc.numRetriesStatus {
+ for i := 0; i < tc.numRetriesStatus; i++ {
retriesStatus = append(retriesStatus, retryStatus)
}
t.Run(tc.name, func(t *testing.T) {
diff --git a/upstream/pkg/apis/pipeline/v1/taskrun_validation.go b/upstream/pkg/apis/pipeline/v1/taskrun_validation.go
index e162672a666..771d684d40c 100644
--- a/upstream/pkg/apis/pipeline/v1/taskrun_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/taskrun_validation.go
@@ -26,7 +26,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
@@ -51,9 +50,6 @@ func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError {
// Validate taskrun spec
func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
- // Validate the spec changes
- errs = errs.Also(ts.ValidateUpdate(ctx))
-
// Must have exactly one of taskRef and taskSpec.
if ts.TaskRef == nil && ts.TaskSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec"))
@@ -84,11 +80,11 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateDebug(ts.Debug).ViaField("debug"))
}
if ts.StepSpecs != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.BetaAPIFields).ViaField("stepSpecs"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepSpecs", config.AlphaAPIFields).ViaField("stepSpecs"))
errs = errs.Also(validateStepSpecs(ts.StepSpecs).ViaField("stepSpecs"))
}
if ts.SidecarSpecs != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.BetaAPIFields).ViaField("sidecarSpecs"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarSpecs", config.AlphaAPIFields).ViaField("sidecarSpecs"))
errs = errs.Also(validateSidecarSpecs(ts.SidecarSpecs).ViaField("sidecarSpecs"))
}
if ts.ComputeResources != nil {
@@ -122,34 +118,6 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
return errs
}
-// ValidateUpdate validates the update of a TaskRunSpec
-func (ts *TaskRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
- if !apis.IsInUpdate(ctx) {
- return
- }
- oldObj, ok := apis.GetBaseline(ctx).(*TaskRun)
- if !ok || oldObj == nil {
- return
- }
- old := &oldObj.Spec
-
- // If already in the done state, the spec cannot be modified.
- // Otherwise, only the status, statusMessage field can be modified.
- tips := "Once the TaskRun is complete, no updates are allowed"
- if !oldObj.IsDone() {
- old = old.DeepCopy()
- old.Status = ts.Status
- old.StatusMessage = ts.StatusMessage
- tips = "Once the TaskRun has started, only status and statusMessage updates are allowed"
- }
-
- if !equality.Semantic.DeepEqual(old, ts) {
- errs = errs.Also(apis.ErrInvalidValue(tips, ""))
- }
-
- return
-}
-
// validateInlineParameters validates that any parameters called in the
// Task spec are declared in the TaskRun.
// This is crucial for propagated parameters because the parameters could
@@ -256,21 +224,9 @@ func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) {
if db == nil || db.Breakpoints == nil {
return errs
}
-
- if db.Breakpoints.OnFailure == "" {
- errs = errs.Also(apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "breakpoints.onFailure"))
- }
-
if db.Breakpoints.OnFailure != "" && db.Breakpoints.OnFailure != EnabledOnFailureBreakpoint {
errs = errs.Also(apis.ErrInvalidValue(db.Breakpoints.OnFailure+" is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "breakpoints.onFailure"))
}
- beforeSteps := sets.NewString()
- for i, step := range db.Breakpoints.BeforeSteps {
- if beforeSteps.Has(step) {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("before step must be unique, the same step: %s is defined multiple times at", step), fmt.Sprintf("breakpoints.beforeSteps[%d]", i)))
- }
- beforeSteps.Insert(step)
- }
return errs
}
diff --git a/upstream/pkg/apis/pipeline/v1/taskrun_validation_test.go b/upstream/pkg/apis/pipeline/v1/taskrun_validation_test.go
index 9d3018b7367..978b3164a1c 100644
--- a/upstream/pkg/apis/pipeline/v1/taskrun_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/taskrun_validation_test.go
@@ -22,7 +22,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
@@ -32,7 +31,6 @@ import (
corev1resources "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
- duckv1 "knative.dev/pkg/apis/duck/v1"
)
func TestTaskRun_Invalidate(t *testing.T) {
@@ -418,7 +416,7 @@ func TestTaskRun_Validate(t *testing.T) {
},
wc: cfgtesting.EnableAlphaAPIFields,
}, {
- name: "beta feature: valid step and sidecar specs",
+ name: "alpha feature: valid step and sidecar specs",
taskRun: &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Name: "tr"},
Spec: v1.TaskRunSpec{
@@ -437,7 +435,7 @@ func TestTaskRun_Validate(t *testing.T) {
}},
},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
@@ -706,36 +704,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) {
wantErr: apis.ErrInvalidValue("turnOn is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "debug.breakpoints.onFailure"),
wc: cfgtesting.EnableAlphaAPIFields,
}, {
- name: "invalid breakpoint duplicate before steps",
- spec: v1.TaskRunSpec{
- TaskRef: &v1.TaskRef{
- Name: "my-task",
- },
- Debug: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"step-1", "step-1"},
- OnFailure: "enabled",
- },
- },
- },
- wantErr: apis.ErrGeneric("before step must be unique, the same step: step-1 is defined multiple times at", "debug.breakpoints.beforeSteps[1]"),
- wc: cfgtesting.EnableAlphaAPIFields,
- }, {
- name: "empty onFailure breakpoint",
- spec: v1.TaskRunSpec{
- TaskRef: &v1.TaskRef{
- Name: "my-task",
- },
- Debug: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- OnFailure: "",
- },
- },
- },
- wantErr: apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "debug.breakpoints.onFailure"),
- wc: cfgtesting.EnableAlphaAPIFields,
- }, {
- name: "stepSpecs disallowed without beta feature gate",
+ name: "stepSpecs disallowed without alpha feature gate",
spec: v1.TaskRunSpec{
TaskRef: &v1.TaskRef{
Name: "foo",
@@ -748,9 +717,9 @@ func TestTaskRunSpec_Invalidate(t *testing.T) {
}},
},
wc: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("stepSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\""),
+ wantErr: apis.ErrGeneric("stepSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
}, {
- name: "sidecarSpec disallowed without beta feature gate",
+ name: "sidecarSpec disallowed without alpha feature gate",
spec: v1.TaskRunSpec{
TaskRef: &v1.TaskRef{
Name: "foo",
@@ -763,7 +732,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) {
}},
},
wc: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("sidecarSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\""),
+ wantErr: apis.ErrGeneric("sidecarSpecs requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
}, {
name: "duplicate stepSpecs names",
spec: v1.TaskRunSpec{
@@ -846,7 +815,7 @@ func TestTaskRunSpec_Invalidate(t *testing.T) {
"stepSpecs.resources",
"computeResources",
),
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "computeResources disallowed without beta feature gate",
spec: v1.TaskRunSpec{
@@ -985,148 +954,3 @@ func TestTaskRunSpec_Validate(t *testing.T) {
})
}
}
-
-func TestTaskRunSpec_ValidateUpdate(t *testing.T) {
- tests := []struct {
- name string
- isCreate bool
- isUpdate bool
- baselineTaskRun *v1.TaskRun
- taskRun *v1.TaskRun
- expectedError apis.FieldError
- }{
- {
- name: "is create ctx",
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{},
- },
- isCreate: true,
- isUpdate: false,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, no changes",
- baselineTaskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is nil, skip validation",
- baselineTaskRun: nil,
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Timeout: &metav1.Duration{Duration: 1},
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, only status changes",
- baselineTaskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "",
- StatusMessage: "",
- },
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "TaskRunCancelled",
- StatusMessage: "TaskRun is cancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, status and timeout changes",
- baselineTaskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "",
- StatusMessage: "",
- Timeout: &metav1.Duration{Duration: 0},
- },
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "TaskRunCancelled",
- StatusMessage: "TaskRun is cancelled",
- Timeout: &metav1.Duration{Duration: 1},
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the TaskRun has started, only status and statusMessage updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is done, status changes",
- baselineTaskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "",
- },
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue},
- },
- },
- },
- },
- taskRun: &v1.TaskRun{
- Spec: v1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the TaskRun is complete, no updates are allowed`,
- Paths: []string{""},
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{},
- Defaults: &config.Defaults{},
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselineTaskRun)
- }
- tr := tt.taskRun
- err := tr.Spec.ValidateUpdate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("TaskRunSpec.ValidateUpdate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1/when_types.go b/upstream/pkg/apis/pipeline/v1/when_types.go
index 0a128d8f9e5..66e7164a779 100644
--- a/upstream/pkg/apis/pipeline/v1/when_types.go
+++ b/upstream/pkg/apis/pipeline/v1/when_types.go
@@ -98,8 +98,6 @@ func (we *WhenExpression) GetVarSubstitutionExpressions() ([]string, bool) {
// All of them need to evaluate to True for a guarded Task to be executed.
type WhenExpressions []WhenExpression
-type StepWhenExpressions = WhenExpressions
-
// AllowsExecution evaluates an Input's relationship to an array of Values, based on the Operator,
// to determine whether all the When Expressions are True. If they are all True, the guarded Task is
// executed, otherwise it is skipped.
diff --git a/upstream/pkg/apis/pipeline/v1/when_validation.go b/upstream/pkg/apis/pipeline/v1/when_validation.go
index a62621a69ff..2a058299a7d 100644
--- a/upstream/pkg/apis/pipeline/v1/when_validation.go
+++ b/upstream/pkg/apis/pipeline/v1/when_validation.go
@@ -48,7 +48,7 @@ func (wes WhenExpressions) validateWhenExpressionsFields(ctx context.Context) (e
func (we *WhenExpression) validateWhenExpressionFields(ctx context.Context) *apis.FieldError {
if we.CEL != "" {
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableCELInWhenExpression {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL), "")
+ return apis.ErrGeneric("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL)
}
if we.Input != "" || we.Operator != "" || len(we.Values) != 0 {
return apis.ErrGeneric(fmt.Sprintf("cel and input+operator+values cannot be set in one WhenExpression: %v", we))
diff --git a/upstream/pkg/apis/pipeline/v1/workspace_validation_test.go b/upstream/pkg/apis/pipeline/v1/workspace_validation_test.go
index f588e710bef..cd93c678500 100644
--- a/upstream/pkg/apis/pipeline/v1/workspace_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1/workspace_validation_test.go
@@ -50,7 +50,7 @@ func TestWorkspaceBindingValidateValid(t *testing.T) {
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
- Resources: corev1.VolumeResourceRequirements{
+ Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"storage": resource.MustParse("1Gi"),
},
diff --git a/upstream/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/upstream/pkg/apis/pipeline/v1/zz_generated.deepcopy.go
index c3057d33561..16e483fcec1 100644
--- a/upstream/pkg/apis/pipeline/v1/zz_generated.deepcopy.go
+++ b/upstream/pkg/apis/pipeline/v1/zz_generated.deepcopy.go
@@ -1248,11 +1248,6 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
- if in.RestartPolicy != nil {
- in, out := &in.RestartPolicy, &out.RestartPolicy
- *out = new(corev1.ContainerRestartPolicy)
- **out = **in
- }
return
}
@@ -1390,13 +1385,6 @@ func (in *Step) DeepCopyInto(out *Step) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.When != nil {
- in, out := &in.When, &out.When
- *out = make(WhenExpressions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
return
}
@@ -1460,11 +1448,6 @@ func (in *StepState) DeepCopyInto(out *StepState) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.Provenance != nil {
- in, out := &in.Provenance, &out.Provenance
- *out = new(Provenance)
- (*in).DeepCopyInto(*out)
- }
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
@@ -1580,11 +1563,6 @@ func (in *Task) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskBreakpoints) DeepCopyInto(out *TaskBreakpoints) {
*out = *in
- if in.BeforeSteps != nil {
- in, out := &in.BeforeSteps, &out.BeforeSteps
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
return
}
@@ -1710,7 +1688,7 @@ func (in *TaskRunDebug) DeepCopyInto(out *TaskRunDebug) {
if in.Breakpoints != nil {
in, out := &in.Breakpoints, &out.Breakpoints
*out = new(TaskBreakpoints)
- (*in).DeepCopyInto(*out)
+ **out = **in
}
return
}
@@ -1939,11 +1917,6 @@ func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.Artifacts != nil {
- in, out := &in.Artifacts, &out.Artifacts
- *out = new(Artifacts)
- (*in).DeepCopyInto(*out)
- }
if in.Sidecars != nil {
in, out := &in.Sidecars, &out.Sidecars
*out = make([]SidecarState, len(*in))
diff --git a/upstream/pkg/apis/pipeline/v1alpha1/openapi_generated.go b/upstream/pkg/apis/pipeline/v1alpha1/openapi_generated.go
index cbcdc1ddf82..93704735c87 100644
--- a/upstream/pkg/apis/pipeline/v1alpha1/openapi_generated.go
+++ b/upstream/pkg/apis/pipeline/v1alpha1/openapi_generated.go
@@ -109,24 +109,11 @@ func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.Reference
},
},
},
- "securityContext": {
- SchemaProps: spec.SchemaProps{
- Description: "SecurityContext sets the security context for the pod",
- Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
- },
- },
- "priorityClassName": {
- SchemaProps: spec.SchemaProps{
- Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
+ "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -404,6 +391,7 @@ func schema_pkg_apis_pipeline_v1alpha1_EmbeddedRunSpec(ref common.ReferenceCallb
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
+ Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
diff --git a/upstream/pkg/apis/pipeline/v1alpha1/run_types_test.go b/upstream/pkg/apis/pipeline/v1alpha1/run_types_test.go
index 50d8cd59689..f805882352d 100644
--- a/upstream/pkg/apis/pipeline/v1alpha1/run_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1alpha1/run_types_test.go
@@ -34,10 +34,8 @@ import (
duckv1 "knative.dev/pkg/apis/duck/v1"
)
-var (
- now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
- testClock = clock.NewFakePassiveClock(now)
-)
+var now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
+var testClock = clock.NewFakePassiveClock(now)
func TestGetParams(t *testing.T) {
for _, c := range []struct {
@@ -345,7 +343,7 @@ func TestRunGetTimeOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.run.GetTimeout()
if d := cmp.Diff(tc.expectedValue, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -368,8 +366,7 @@ func TestRunHasTimedOut(t *testing.T) {
RunStatusFields: v1alpha1.RunStatusFields{
StartTime: &metav1.Time{Time: now},
},
- },
- },
+ }},
expectedValue: false,
}, {
name: "runWithStartTimeNoTimeout2",
@@ -379,8 +376,7 @@ func TestRunHasTimedOut(t *testing.T) {
RunStatusFields: v1alpha1.RunStatusFields{
StartTime: &metav1.Time{Time: now.Add(-1 * (apisconfig.DefaultTimeoutMinutes + 1) * time.Minute)},
},
- },
- },
+ }},
expectedValue: true,
}, {
name: "runWithStartTimeAndTimeout",
@@ -389,8 +385,7 @@ func TestRunHasTimedOut(t *testing.T) {
Spec: v1alpha1.RunSpec{Timeout: &metav1.Duration{Duration: 10 * time.Second}},
Status: v1alpha1.RunStatus{RunStatusFields: v1alpha1.RunStatusFields{
StartTime: &metav1.Time{Time: now.Add(-1 * (apisconfig.DefaultTimeoutMinutes + 1) * time.Minute)},
- }},
- },
+ }}},
expectedValue: true,
}, {
name: "runWithNoStartTimeAndTimeout",
@@ -406,8 +401,7 @@ func TestRunHasTimedOut(t *testing.T) {
Spec: v1alpha1.RunSpec{Timeout: &metav1.Duration{Duration: 10 * time.Second}},
Status: v1alpha1.RunStatus{RunStatusFields: v1alpha1.RunStatusFields{
StartTime: &metav1.Time{Time: now},
- }},
- },
+ }}},
expectedValue: false,
}}
@@ -415,7 +409,7 @@ func TestRunHasTimedOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.run.HasTimedOut(testClock)
if d := cmp.Diff(tc.expectedValue, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
diff --git a/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion.go b/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion.go
index b2896d0de8f..a02bf76096a 100644
--- a/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion.go
@@ -15,9 +15,7 @@ package v1alpha1
import (
"context"
- "fmt"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"knative.dev/pkg/apis"
)
@@ -25,63 +23,20 @@ var _ apis.Convertible = (*StepAction)(nil)
// ConvertTo implements apis.Convertible
func (s *StepAction) ConvertTo(ctx context.Context, to apis.Convertible) error {
- if apis.IsInDelete(ctx) {
- return nil
- }
- switch sink := to.(type) {
- case *v1beta1.StepAction:
- sink.ObjectMeta = s.ObjectMeta
- return s.Spec.ConvertTo(ctx, &sink.Spec)
- default:
- return fmt.Errorf("unknown version, got: %T", sink)
- }
+ return nil
}
// ConvertTo implements apis.Convertible
-func (ss *StepActionSpec) ConvertTo(ctx context.Context, sink *v1beta1.StepActionSpec) error {
- sink.Description = ss.Description
- sink.Image = ss.Image
- sink.Command = ss.Command
- sink.Args = ss.Args
- sink.Env = ss.Env
- sink.Script = ss.Script
- sink.WorkingDir = ss.WorkingDir
- sink.Params = ss.Params
- sink.Results = ss.Results
- sink.SecurityContext = ss.SecurityContext
- sink.VolumeMounts = ss.VolumeMounts
-
+func (ss *StepActionSpec) ConvertTo(ctx context.Context, sink *StepActionSpec) error {
return nil
}
// ConvertFrom implements apis.Convertible
func (s *StepAction) ConvertFrom(ctx context.Context, from apis.Convertible) error {
- if apis.IsInDelete(ctx) {
- return nil
- }
- switch source := from.(type) {
- case *v1beta1.StepAction:
- s.ObjectMeta = source.ObjectMeta
- return s.Spec.ConvertFrom(ctx, &source.Spec)
- default:
- return fmt.Errorf("unknown version, got: %T", source)
- }
+ return nil
}
// ConvertFrom implements apis.Convertible
-func (ss *StepActionSpec) ConvertFrom(ctx context.Context, source *v1beta1.StepActionSpec) error {
- ss.Description = source.Description
- ss.Image = source.Image
- ss.Command = source.Command
- ss.Args = source.Args
- ss.Env = source.Env
- ss.Script = source.Script
- ss.WorkingDir = source.WorkingDir
-
- ss.Params = source.Params
- ss.Results = source.Results
- ss.SecurityContext = source.SecurityContext
- ss.VolumeMounts = source.VolumeMounts
-
+func (ss *StepActionSpec) ConvertFrom(ctx context.Context, source *StepActionSpec) error {
return nil
}
diff --git a/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion_test.go b/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion_test.go
deleted file mode 100644
index 9193b6e7b32..00000000000
--- a/upstream/pkg/apis/pipeline/v1alpha1/stepaction_conversion_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package v1alpha1_test
-
-import (
- "context"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "github.com/tektoncd/pipeline/test/diff"
- "github.com/tektoncd/pipeline/test/parse"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func TestPipelineConversionBadType(t *testing.T) {
- good, bad := &v1alpha1.StepAction{}, &v1beta1.Task{}
-
- if err := good.ConvertTo(context.Background(), bad); err == nil {
- t.Errorf("ConvertTo() = %#v, wanted error", bad)
- }
-
- if err := good.ConvertFrom(context.Background(), bad); err == nil {
- t.Errorf("ConvertFrom() = %#v, wanted error", good)
- }
-}
-
-func TestStepActionConversion(t *testing.T) {
- stepActionWithAllFieldsYaml := `
-metadata:
- name: foo
- namespace: bar
-spec:
- name: all-fields
- image: foo
- command: ["hello"]
- args: ["world"]
- results:
- - name: result1
- - name: result2
- script: |
- echo "I am a Step Action!!!" >> $(step.results.result1.path)
- echo "I am a hidden step action!!!" >> $(step.results.result2.path)
- workingDir: "/dir"
- envFrom:
- - prefix: prefix
- params:
- - name: string-param
- type: string
- default: "a string param"
- - name: array-param
- type: array
- default:
- - an
- - array
- - param
- - name: object-param
- type: object
- properties:
- key1:
- type: string
- key2:
- type: string
- key3:
- type: string
- default:
- key1: "step-action default key1"
- key2: "step-action default key2"
- key3: "step-action default key3"
- volumeMounts:
- - name: data
- mountPath: /data
- securityContext:
- privileged: true
-`
-
- stepActionV1alpha1 := parse.MustParseV1alpha1StepAction(t, stepActionWithAllFieldsYaml)
- stepActionV1beta1 := parse.MustParseV1beta1StepAction(t, stepActionWithAllFieldsYaml)
-
- var ignoreTypeMeta = cmpopts.IgnoreFields(metav1.TypeMeta{}, "Kind", "APIVersion")
-
- tests := []struct {
- name string
- v1AlphaStepAction *v1alpha1.StepAction
- v1Beta1StepAction *v1beta1.StepAction
- }{{
- name: "stepAction conversion with all fields",
- v1AlphaStepAction: stepActionV1alpha1,
- v1Beta1StepAction: stepActionV1beta1,
- }}
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- v1Beta1StepAction := &v1beta1.StepAction{}
- if err := test.v1AlphaStepAction.ConvertTo(context.Background(), v1Beta1StepAction); err != nil {
- t.Errorf("ConvertTo() = %v", err)
- return
- }
- t.Logf("ConvertTo() = %#v", v1Beta1StepAction)
- if d := cmp.Diff(test.v1Beta1StepAction, v1Beta1StepAction, ignoreTypeMeta); d != "" {
- t.Errorf("expected v1Task is different from what's converted: %s", d)
- }
- gotV1alpha1 := &v1alpha1.StepAction{}
- if err := gotV1alpha1.ConvertFrom(context.Background(), v1Beta1StepAction); err != nil {
- t.Errorf("ConvertFrom() = %v", err)
- }
- t.Logf("ConvertFrom() = %#v", gotV1alpha1)
- if d := cmp.Diff(test.v1AlphaStepAction, gotV1alpha1, ignoreTypeMeta); d != "" {
- t.Errorf("roundtrip %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1alpha1/swagger.json b/upstream/pkg/apis/pipeline/v1alpha1/swagger.json
index 12ecda6393c..4da8930fc32 100644
--- a/upstream/pkg/apis/pipeline/v1alpha1/swagger.json
+++ b/upstream/pkg/apis/pipeline/v1alpha1/swagger.json
@@ -28,14 +28,6 @@
"default": ""
}
},
- "priorityClassName": {
- "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- "type": "string"
- },
- "securityContext": {
- "description": "SecurityContext sets the security context for the pod",
- "$ref": "#/definitions/v1.PodSecurityContext"
- },
"tolerations": {
"description": "If specified, the pod's tolerations.",
"type": "array",
@@ -193,6 +185,7 @@
},
"spec": {
"description": "Spec is a specification of a custom task",
+ "default": {},
"$ref": "#/definitions/k8s.io.apimachinery.pkg.runtime.RawExtension"
}
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/artifact_types.go b/upstream/pkg/apis/pipeline/v1beta1/artifact_types.go
index 23778a90bbc..ec50bde8a16 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/artifact_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/artifact_types.go
@@ -6,12 +6,8 @@ type Algorithm string
// Artifact represents an artifact within a system, potentially containing multiple values
// associated with it.
type Artifact struct {
- // The artifact's identifying category name
- Name string `json:"name,omitempty"`
- // A collection of values related to the artifact
- Values []ArtifactValue `json:"values,omitempty"`
- // Indicate if the artifact is a build output or a by-product
- BuildOutput bool `json:"buildOutput,omitempty"`
+ Name string `json:"name,omitempty"` // The artifact's identifying category name
+ Values []ArtifactValue `json:"values,omitempty"` // A collection of values related to the artifact
}
// ArtifactValue represents a specific value or data element within an Artifact.
diff --git a/upstream/pkg/apis/pipeline/v1beta1/container_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/container_conversion.go
index 5b61377bcc8..2e828bc5add 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/container_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/container_conversion.go
@@ -72,11 +72,6 @@ func (s Step) convertTo(ctx context.Context, sink *v1.Step) {
sink.Params = append(sink.Params, new)
}
sink.Results = s.Results
- for _, w := range s.When {
- new := v1.WhenExpression{}
- w.convertTo(ctx, &new)
- sink.When = append(sink.When, new)
- }
}
func (s *Step) convertFrom(ctx context.Context, source v1.Step) {
@@ -116,11 +111,6 @@ func (s *Step) convertFrom(ctx context.Context, source v1.Step) {
s.Params = append(s.Params, new)
}
s.Results = source.Results
- for _, w := range source.When {
- new := WhenExpression{}
- new.convertFrom(ctx, w)
- s.When = append(s.When, new)
- }
}
func (s StepTemplate) convertTo(ctx context.Context, sink *v1.StepTemplate) {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/container_types.go b/upstream/pkg/apis/pipeline/v1beta1/container_types.go
index 95f852bf496..4494184d7aa 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/container_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/container_types.go
@@ -247,8 +247,6 @@ type Step struct {
// +optional
// +listType=atomic
Results []v1.StepResult `json:"results,omitempty"`
-
- When StepWhenExpressions `json:"when,omitempty"`
}
// Ref can be used to refer to a specific instance of a StepAction.
@@ -747,43 +745,10 @@ type Sidecar struct {
// +optional
// +listType=atomic
Workspaces []WorkspaceUsage `json:"workspaces,omitempty"`
-
- // RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an
- // initContainer and must have it's policy set to "Always". It is currently
- // left optional to help support Kubernetes versions prior to 1.29 when this feature
- // was introduced.
- // +optional
- RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
}
// ToK8sContainer converts the Sidecar to a Kubernetes Container struct
func (s *Sidecar) ToK8sContainer() *corev1.Container {
- if s.RestartPolicy == nil {
- return &corev1.Container{
- Name: s.Name,
- Image: s.Image,
- Command: s.Command,
- Args: s.Args,
- WorkingDir: s.WorkingDir,
- Ports: s.Ports,
- EnvFrom: s.EnvFrom,
- Env: s.Env,
- Resources: s.Resources,
- VolumeMounts: s.VolumeMounts,
- VolumeDevices: s.VolumeDevices,
- LivenessProbe: s.LivenessProbe,
- ReadinessProbe: s.ReadinessProbe,
- StartupProbe: s.StartupProbe,
- Lifecycle: s.Lifecycle,
- TerminationMessagePath: s.TerminationMessagePath,
- TerminationMessagePolicy: s.TerminationMessagePolicy,
- ImagePullPolicy: s.ImagePullPolicy,
- SecurityContext: s.SecurityContext,
- Stdin: s.Stdin,
- StdinOnce: s.StdinOnce,
- TTY: s.TTY,
- }
- }
return &corev1.Container{
Name: s.Name,
Image: s.Image,
@@ -798,7 +763,6 @@ func (s *Sidecar) ToK8sContainer() *corev1.Container {
VolumeDevices: s.VolumeDevices,
LivenessProbe: s.LivenessProbe,
ReadinessProbe: s.ReadinessProbe,
- RestartPolicy: s.RestartPolicy,
StartupProbe: s.StartupProbe,
Lifecycle: s.Lifecycle,
TerminationMessagePath: s.TerminationMessagePath,
@@ -835,5 +799,4 @@ func (s *Sidecar) SetContainerFields(c corev1.Container) {
s.Stdin = c.Stdin
s.StdinOnce = c.StdinOnce
s.TTY = c.TTY
- s.RestartPolicy = c.RestartPolicy
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/container_validation.go b/upstream/pkg/apis/pipeline/v1beta1/container_validation.go
index dc1b60d15ba..bab6f8bc4d5 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/container_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/container_validation.go
@@ -18,9 +18,6 @@ package v1beta1
import (
"context"
- "errors"
- "fmt"
- "regexp"
"strings"
"github.com/tektoncd/pipeline/pkg/apis/config"
@@ -28,72 +25,38 @@ import (
"knative.dev/pkg/apis"
)
-func validateRef(ctx context.Context, refName string, refResolver ResolverName, refParams Params) (errs *apis.FieldError) {
+// Validate ensures that a supplied Ref field is populated
+// correctly. No errors are returned for a nil Ref.
+func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
+ if ref == nil {
+ return errs
+ }
+
switch {
- case refResolver != "" || refParams != nil:
- if refParams != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
- if refName != "" {
- errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
- }
- if refResolver == "" {
- errs = errs.Also(apis.ErrMissingField("resolver"))
- }
- errs = errs.Also(ValidateParameters(ctx, refParams))
- }
- if refResolver != "" {
+ case ref.Resolver != "" || ref.Params != nil:
+ if ref.Resolver != "" {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
- if refName != "" {
- // make sure that the name is url-like.
- err := RefNameLikeUrl(refName)
- if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- // If name is url-like then concise resolver syntax must be enabled
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- if err != nil {
- errs = errs.Also(apis.ErrInvalidValue(err, "name"))
- }
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
}
}
- case refName != "":
- // ref name can be a Url-like format.
- if err := RefNameLikeUrl(refName); err == nil {
- // If name is url-like then concise resolver syntax must be enabled
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
+ if ref.Params != nil {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
- // In stage1 of concise remote resolvers syntax, this is a required field.
- // TODO: remove this check when implementing stage 2 where this is optional.
- if refResolver == "" {
+ if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
- // Or, it must be a valid k8s name
- } else {
- // ref name must be a valid k8s name
- if errSlice := validation.IsQualifiedName(refName); len(errSlice) != 0 {
- errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
- }
+ errs = errs.Also(ValidateParameters(ctx, ref.Params))
+ }
+ case ref.Name != "":
+ // Ref name must be a valid k8s name
+ if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
+ errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
}
default:
errs = errs.Also(apis.ErrMissingField("name"))
}
return errs
}
-
-// Validate ensures that a supplied Ref field is populated
-// correctly. No errors are returned for a nil Ref.
-func (ref *Ref) Validate(ctx context.Context) (errs *apis.FieldError) {
- if ref == nil {
- return errs
- }
- return validateRef(ctx, ref.Name, ref.Resolver, ref.Params)
-}
-
-// RefNameLikeUrl checks if the name is url parsable and returns an error if it isn't.
-func RefNameLikeUrl(name string) error {
- schemeRegex := regexp.MustCompile(`[\w-]+:\/\/*`)
- if !schemeRegex.MatchString(name) {
- return errors.New("invalid URI for request")
- }
- return nil
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/container_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/container_validation_test.go
index cc423d3bd52..95e7e4a28c0 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/container_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/container_validation_test.go
@@ -21,22 +21,12 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- "github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/test/diff"
"knative.dev/pkg/apis"
)
-func enableConciseResolverSyntax(ctx context.Context) context.Context {
- return config.ToContext(ctx, &config.Config{
- FeatureFlags: &config.FeatureFlags{
- EnableConciseResolverSyntax: true,
- EnableAPIFields: config.BetaAPIFields,
- },
- })
-}
-
func TestRef_Valid(t *testing.T) {
tests := []struct {
name string
@@ -47,10 +37,6 @@ func TestRef_Valid(t *testing.T) {
}, {
name: "simple ref",
ref: &v1beta1.Ref{Name: "refname"},
- }, {
- name: "ref name - consice syntax",
- ref: &v1beta1.Ref{Name: "foo://baz:ver", ResolverRef: v1beta1.ResolverRef{Resolver: "git"}},
- wc: enableConciseResolverSyntax,
}, {
name: "beta feature: valid resolver",
ref: &v1beta1.Ref{ResolverRef: v1beta1.ResolverRef{Resolver: "git"}},
@@ -107,45 +93,29 @@ func TestRef_Invalid(t *testing.T) {
},
wantErr: apis.ErrMissingField("resolver"),
}, {
- name: "ref with resolver and k8s style name",
+ name: "ref resolver disallowed in conjunction with ref name",
ref: &v1beta1.Ref{
Name: "foo",
ResolverRef: v1beta1.ResolverRef{
Resolver: "git",
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
}, {
- name: "ref with url-like name without resolver",
+ name: "ref params disallowed in conjunction with ref name",
ref: &v1beta1.Ref{
- Name: "https://foo.com/bar",
- },
- wantErr: apis.ErrMissingField("resolver"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "ref params disallowed in conjunction with pipelineref name",
- ref: &v1beta1.Ref{
- Name: "https://foo/bar",
+ Name: "bar",
ResolverRef: v1beta1.ResolverRef{
- Resolver: "git",
- Params: v1beta1.Params{{Name: "foo", Value: v1beta1.ParamValue{StringVal: "bar"}}},
+ Params: v1beta1.Params{{
+ Name: "foo",
+ Value: v1beta1.ParamValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- wc: enableConciseResolverSyntax,
- }, {
- name: "ref with url-like name without enable-concise-resolver-syntax",
- ref: &v1beta1.Ref{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
- }, {
- name: "ref without enable-concise-resolver-syntax",
- ref: &v1beta1.Ref{Name: "https://foo.com/bar", ResolverRef: v1beta1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- },
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
}, {
name: "invalid ref name",
ref: &v1beta1.Ref{Name: "_foo"},
diff --git a/upstream/pkg/apis/pipeline/v1beta1/customrun_types_test.go b/upstream/pkg/apis/pipeline/v1beta1/customrun_types_test.go
index 011ce02264b..39b66c008e6 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/customrun_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/customrun_types_test.go
@@ -376,7 +376,7 @@ func TestRunGetTimeOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.customRun.GetTimeout()
if d := cmp.Diff(tc.expectedValue, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -399,8 +399,7 @@ func TestRunHasTimedOut(t *testing.T) {
CustomRunStatusFields: v1beta1.CustomRunStatusFields{
StartTime: &metav1.Time{Time: now},
},
- },
- },
+ }},
expectedValue: false,
}, {
name: "runWithStartTimeNoTimeout2",
@@ -410,8 +409,7 @@ func TestRunHasTimedOut(t *testing.T) {
CustomRunStatusFields: v1beta1.CustomRunStatusFields{
StartTime: &metav1.Time{Time: now.Add(-1 * (apisconfig.DefaultTimeoutMinutes + 1) * time.Minute)},
},
- },
- },
+ }},
expectedValue: true,
}, {
name: "runWithStartTimeAndTimeout",
@@ -420,8 +418,7 @@ func TestRunHasTimedOut(t *testing.T) {
Spec: v1beta1.CustomRunSpec{Timeout: &metav1.Duration{Duration: 10 * time.Second}},
Status: v1beta1.CustomRunStatus{CustomRunStatusFields: v1beta1.CustomRunStatusFields{
StartTime: &metav1.Time{Time: now.Add(-1 * (apisconfig.DefaultTimeoutMinutes + 1) * time.Minute)},
- }},
- },
+ }}},
expectedValue: true,
}, {
name: "runWithNoStartTimeAndTimeout",
@@ -437,8 +434,7 @@ func TestRunHasTimedOut(t *testing.T) {
Spec: v1beta1.CustomRunSpec{Timeout: &metav1.Duration{Duration: 10 * time.Second}},
Status: v1beta1.CustomRunStatus{CustomRunStatusFields: v1beta1.CustomRunStatusFields{
StartTime: &metav1.Time{Time: now},
- }},
- },
+ }}},
expectedValue: false,
}}
@@ -446,7 +442,7 @@ func TestRunHasTimedOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.customRun.HasTimedOut(testClock)
if d := cmp.Diff(tc.expectedValue, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/merge.go b/upstream/pkg/apis/pipeline/v1beta1/merge.go
index 62111ee7ccb..42d9ffa868c 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/merge.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/merge.go
@@ -47,11 +47,6 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e
}
for i, s := range steps {
- // If the stepaction has not been fetched yet then do not merge.
- // Skip over to the next one
- if s.Ref != nil {
- continue
- }
merged := corev1.Container{}
err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged)
if err != nil {
@@ -66,7 +61,7 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e
amendConflictingContainerFields(&merged, s)
// Pass through original step Script, for later conversion.
- newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig, When: s.When}
+ newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig}
newStep.SetContainerFields(merged)
steps[i] = newStep
}
@@ -82,6 +77,7 @@ func MergeStepsWithOverrides(steps []Step, overrides []TaskRunStepOverride) ([]S
stepNameToOverride[o.Name] = o
}
for i, s := range steps {
+ s := s
o, found := stepNameToOverride[s.Name]
if !found {
continue
@@ -108,6 +104,7 @@ func MergeSidecarsWithOverrides(sidecars []Sidecar, overrides []TaskRunSidecarOv
sidecarNameToOverride[o.Name] = o
}
for i, s := range sidecars {
+ s := s
o, found := sidecarNameToOverride[s.Name]
if !found {
continue
diff --git a/upstream/pkg/apis/pipeline/v1beta1/merge_test.go b/upstream/pkg/apis/pipeline/v1beta1/merge_test.go
index fe177e431b2..08fff766f65 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/merge_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/merge_test.go
@@ -20,13 +20,10 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
- "k8s.io/apimachinery/pkg/selection"
- "k8s.io/utils/pointer"
)
func TestMergeStepsWithStepTemplate(t *testing.T) {
@@ -130,52 +127,6 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
MountPath: "/workspace/data",
}},
}},
- }, {
- name: "step-ref-should-not-be-merged-with-steptemplate",
- template: &v1beta1.StepTemplate{
- SecurityContext: &corev1.SecurityContext{
- RunAsNonRoot: pointer.Bool(true),
- },
- VolumeMounts: []corev1.VolumeMount{{
- Name: "data",
- MountPath: "/workspace/data",
- }},
- Env: []corev1.EnvVar{{
- Name: "KEEP_THIS",
- Value: "A_VALUE",
- }, {
- Name: "SOME_KEY_1",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- Key: "A_KEY",
- LocalObjectReference: corev1.LocalObjectReference{Name: "A_NAME"},
- },
- },
- }, {
- Name: "SOME_KEY_2",
- Value: "VALUE_2",
- }},
- },
- steps: []v1beta1.Step{{
- Ref: &v1beta1.Ref{Name: "my-step-action"},
- OnError: "foo",
- Results: []v1.StepResult{{
- Name: "result",
- }},
- Params: v1beta1.Params{{
- Name: "param",
- }},
- }},
- expected: []v1beta1.Step{{
- Ref: &v1beta1.Ref{Name: "my-step-action"},
- OnError: "foo",
- Results: []v1.StepResult{{
- Name: "result",
- }},
- Params: v1beta1.Params{{
- Name: "param",
- }},
- }},
}, {
name: "merge-env-by-step",
template: &v1beta1.StepTemplate{
@@ -232,17 +183,6 @@ func TestMergeStepsWithStepTemplate(t *testing.T) {
},
}},
}},
- }, {
- name: "when",
- template: nil,
- steps: []v1beta1.Step{{
- Image: "some-image",
- When: v1beta1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo", "bar"}}},
- }},
- expected: []v1beta1.Step{{
- Image: "some-image",
- When: v1beta1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo", "bar"}}},
- }},
}} {
t.Run(tc.name, func(t *testing.T) {
result, err := v1beta1.MergeStepsWithStepTemplate(tc.template, tc.steps)
diff --git a/upstream/pkg/apis/pipeline/v1beta1/openapi_generated.go b/upstream/pkg/apis/pipeline/v1beta1/openapi_generated.go
index 145e72b083a..d938565b86d 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/openapi_generated.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/openapi_generated.go
@@ -87,9 +87,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref),
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction": schema_pkg_apis_pipeline_v1beta1_StepAction(ref),
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionList": schema_pkg_apis_pipeline_v1beta1_StepActionList(ref),
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec": schema_pkg_apis_pipeline_v1beta1_StepActionSpec(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref),
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref),
@@ -189,24 +186,11 @@ func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.Reference
},
},
},
- "securityContext": {
- SchemaProps: spec.SchemaProps{
- Description: "SecurityContext sets the security context for the pod",
- Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
- },
- },
- "priorityClassName": {
- SchemaProps: spec.SchemaProps{
- Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"},
+ "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.Toleration"},
}
}
@@ -435,14 +419,13 @@ func schema_pkg_apis_pipeline_v1beta1_Artifact(ref common.ReferenceCallback) com
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
- Description: "The artifact's identifying category name",
- Type: []string{"string"},
- Format: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
- Description: "A collection of values related to the artifact",
+ Description: "The artifact's identifying category name",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
@@ -454,13 +437,6 @@ func schema_pkg_apis_pipeline_v1beta1_Artifact(ref common.ReferenceCallback) com
},
},
},
- "buildOutput": {
- SchemaProps: spec.SchemaProps{
- Description: "Indicate if the artifact is a build output or a by-product",
- Type: []string{"boolean"},
- Format: "",
- },
- },
},
},
},
@@ -1040,6 +1016,7 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref common.Reference
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
+ Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
@@ -1073,6 +1050,7 @@ func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback)
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is a specification of a custom task",
+ Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
@@ -1410,7 +1388,8 @@ func schema_pkg_apis_pipeline_v1beta1_Param(ref common.ReferenceCallback) common
},
"value": {
SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
+ Default: map[string]interface{}{},
+ Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
},
@@ -1710,7 +1689,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref common.ReferenceCallback)
},
"bundle": {
SchemaProps: spec.SchemaProps{
- Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
+ Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.",
Type: []string{"string"},
Format: "",
},
@@ -1815,6 +1794,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallbac
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the expression used to retrieve the value",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
@@ -1940,6 +1920,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref common.ReferenceCall
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value is the result returned from the execution of this PipelineRun",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
@@ -3637,13 +3618,6 @@ func schema_pkg_apis_pipeline_v1beta1_Sidecar(ref common.ReferenceCallback) comm
},
},
},
- "restartPolicy": {
- SchemaProps: spec.SchemaProps{
- Description: "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
Required: []string{"name"},
},
@@ -4105,285 +4079,12 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common.
},
},
},
- "when": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression"),
- },
- },
- },
- },
- },
},
Required: []string{"name"},
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
- }
-}
-
-func schema_pkg_apis_pipeline_v1beta1_StepAction(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "StepAction represents the actionable components of Step. The Step can only reference it from the cluster or using remote resolution.",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
- },
- },
- "spec": {
- SchemaProps: spec.SchemaProps{
- Description: "Spec holds the desired state of the Step from the client",
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec"),
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepActionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
- }
-}
-
-func schema_pkg_apis_pipeline_v1beta1_StepActionList(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "StepActionList contains a list of StepActions",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "kind": {
- SchemaProps: spec.SchemaProps{
- Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- Type: []string{"string"},
- Format: "",
- },
- },
- "apiVersion": {
- SchemaProps: spec.SchemaProps{
- Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- Type: []string{"string"},
- Format: "",
- },
- },
- "metadata": {
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
- },
- },
- "items": {
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction"),
- },
- },
- },
- },
- },
- },
- Required: []string{"items"},
- },
- },
- Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepAction", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
- }
-}
-
-func schema_pkg_apis_pipeline_v1beta1_StepActionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "StepActionSpec contains the actionable components of a step.",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "description": {
- SchemaProps: spec.SchemaProps{
- Description: "Description is a user-facing description of the stepaction that may be used to populate a UI.",
- Type: []string{"string"},
- Format: "",
- },
- },
- "image": {
- SchemaProps: spec.SchemaProps{
- Description: "Image reference name to run for this StepAction. More info: https://kubernetes.io/docs/concepts/containers/images",
- Type: []string{"string"},
- Format: "",
- },
- },
- "command": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "args": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "env": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- "x-kubernetes-patch-merge-key": "name",
- "x-kubernetes-patch-strategy": "merge",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "List of environment variables to set in the container. Cannot be updated.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/api/core/v1.EnvVar"),
- },
- },
- },
- },
- },
- "script": {
- SchemaProps: spec.SchemaProps{
- Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
- Type: []string{"string"},
- Format: "",
- },
- },
- "workingDir": {
- SchemaProps: spec.SchemaProps{
- Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
- Type: []string{"string"},
- Format: "",
- },
- },
- "params": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Params is a list of input parameters required to run the stepAction. Params must be supplied as inputs in Steps unless they declare a defaultvalue.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"),
- },
- },
- },
- },
- },
- "results": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Results are values that this StepAction can output",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult"),
- },
- },
- },
- },
- },
- "securityContext": {
- SchemaProps: spec.SchemaProps{
- Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ The value set in StepAction will take precedence over the value from Task.",
- Ref: ref("k8s.io/api/core/v1.SecurityContext"),
- },
- },
- "volumeMounts": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- "x-kubernetes-patch-merge-key": "mountPath",
- "x-kubernetes-patch-strategy": "merge",
- },
- },
- SchemaProps: spec.SchemaProps{
- Description: "Volumes to mount into the Step's filesystem. Cannot be updated.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("k8s.io/api/core/v1.VolumeMount"),
- },
- },
- },
- },
- },
- },
- },
- },
- Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeMount"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Ref", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"},
}
}
@@ -4463,11 +4164,6 @@ func schema_pkg_apis_pipeline_v1beta1_StepState(ref common.ReferenceCallback) co
},
},
},
- "provenance": {
- SchemaProps: spec.SchemaProps{
- Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance"),
- },
- },
"inputs": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
@@ -4498,7 +4194,7 @@ func schema_pkg_apis_pipeline_v1beta1_StepState(ref common.ReferenceCallback) co
},
},
Dependencies: []string{
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Provenance", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Artifact", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult", "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"},
}
}
@@ -4822,25 +4518,6 @@ func schema_pkg_apis_pipeline_v1beta1_TaskBreakpoints(ref common.ReferenceCallba
Format: "",
},
},
- "beforeSteps": {
- VendorExtensible: spec.VendorExtensible{
- Extensions: spec.Extensions{
- "x-kubernetes-list-type": "atomic",
- },
- },
- SchemaProps: spec.SchemaProps{
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
},
},
},
@@ -4926,7 +4603,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRef(ref common.ReferenceCallback) comm
},
"bundle": {
SchemaProps: spec.SchemaProps{
- Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
+ Description: "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.",
Type: []string{"string"},
Format: "",
},
@@ -5432,6 +5109,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref common.ReferenceCallback
"value": {
SchemaProps: spec.SchemaProps{
Description: "Value the given value of the result",
+ Default: map[string]interface{}{},
Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue"),
},
},
@@ -6619,13 +6297,6 @@ func schema_pkg_apis_resolution_v1beta1_ResolutionRequestSpec(ref common.Referen
},
},
},
- "url": {
- SchemaProps: spec.SchemaProps{
- Description: "URL is the runtime url passed to the resolver to help it figure out how to resolver the resource being requested. This is currently at an ALPHA stability level and subject to alpha API compatibility policies.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
},
},
diff --git a/upstream/pkg/apis/pipeline/v1beta1/param_types_test.go b/upstream/pkg/apis/pipeline/v1beta1/param_types_test.go
index 244bd27249d..8f15dae7545 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/param_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/param_types_test.go
@@ -307,6 +307,7 @@ type ParamValuesHolder struct {
AOrS v1beta1.ParamValue `json:"val"`
}
+//nolint:musttag
func TestParamValues_UnmarshalJSON(t *testing.T) {
cases := []struct {
input map[string]interface{}
@@ -397,6 +398,7 @@ func TestParamValues_UnmarshalJSON_Directly(t *testing.T) {
}
}
+//nolint:musttag
func TestParamValues_UnmarshalJSON_Error(t *testing.T) {
cases := []struct {
desc string
@@ -414,6 +416,7 @@ func TestParamValues_UnmarshalJSON_Error(t *testing.T) {
}
}
+//nolint:musttag
func TestParamValues_MarshalJSON(t *testing.T) {
cases := []struct {
input v1beta1.ParamValue
@@ -451,7 +454,7 @@ func TestArrayReference(t *testing.T) {
}}
for _, tt := range tests {
if d := cmp.Diff(tt.expectedResult, v1beta1.ArrayReference(tt.p)); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -482,7 +485,7 @@ func TestArrayOrString(t *testing.T) {
}
if d := cmp.Diff(tt.expected, expected); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -529,7 +532,7 @@ func TestExtractNames(t *testing.T) {
}}
for _, tt := range tests {
if d := cmp.Diff(tt.want, v1beta1.Params.ExtractNames(tt.params)); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -592,7 +595,7 @@ func TestParams_ReplaceVariables(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
got := tt.ps.ReplaceVariables(tt.stringReplacements, tt.arrayReplacements, tt.objectReplacements)
if d := cmp.Diff(tt.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipeline_types_test.go b/upstream/pkg/apis/pipeline/v1beta1/pipeline_types_test.go
index 7d9c130ac7b..d0dc19b4cdf 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipeline_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipeline_types_test.go
@@ -86,7 +86,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskContinue,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "valid PipelineTask with onError:stopAndFail",
p: PipelineTask{
@@ -94,7 +94,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskStopAndFail,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "invalid OnError value",
p: PipelineTask{
@@ -103,7 +103,7 @@ func TestPipelineTask_OnError(t *testing.T) {
TaskRef: &TaskRef{Name: "foo"},
},
expectedError: apis.ErrInvalidValue("invalid-val", "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""),
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "OnError:stopAndFail and retries coexist - success",
p: PipelineTask{
@@ -112,7 +112,7 @@ func TestPipelineTask_OnError(t *testing.T) {
Retries: 1,
TaskRef: &TaskRef{Name: "foo"},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}, {
name: "OnError:continue and retries coexists - failure",
p: PipelineTask{
@@ -122,6 +122,15 @@ func TestPipelineTask_OnError(t *testing.T) {
TaskRef: &TaskRef{Name: "foo"},
},
expectedError: apis.ErrGeneric("PipelineTask OnError cannot be set to \"continue\" when Retries is greater than 0"),
+ wc: cfgtesting.EnableAlphaAPIFields,
+ }, {
+ name: "setting OnError in beta API version - failure",
+ p: PipelineTask{
+ Name: "foo",
+ OnError: PipelineTaskContinue,
+ TaskRef: &TaskRef{Name: "foo"},
+ },
+ expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"beta\""),
wc: cfgtesting.EnableBetaAPIFields,
}, {
name: "setting OnError in stable API version - failure",
@@ -130,7 +139,7 @@ func TestPipelineTask_OnError(t *testing.T) {
OnError: PipelineTaskContinue,
TaskRef: &TaskRef{Name: "foo"},
},
- expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\""),
+ expectedError: apis.ErrGeneric("OnError requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
wc: cfgtesting.EnableStableAPIFields,
}}
for _, tt := range tests {
@@ -497,6 +506,39 @@ func TestPipelineTask_ValidateCustomTask(t *testing.T) {
}
}
+func TestPipelineTask_ValidateBundle_Failure(t *testing.T) {
+ tests := []struct {
+ name string
+ p PipelineTask
+ expectedError apis.FieldError
+ }{{
+ name: "bundle - invalid reference",
+ p: PipelineTask{
+ Name: "foo",
+ TaskRef: &TaskRef{Name: "bar", Bundle: "invalid reference"},
+ },
+ expectedError: *apis.ErrInvalidValue("invalid bundle reference (could not parse reference: invalid reference)", "taskRef.bundle"),
+ }, {
+ name: "bundle - missing taskRef name",
+ p: PipelineTask{
+ Name: "foo",
+ TaskRef: &TaskRef{Bundle: "valid-bundle"},
+ },
+ expectedError: *apis.ErrMissingField("taskRef.name"),
+ }}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.p.validateBundle()
+ if err == nil {
+ t.Error("PipelineTask.ValidateBundles() did not return error for invalid bundle in a pipelineTask")
+ }
+ if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
+ t.Errorf("Pipeline.ValidateBundles() errors diff %s", diff.PrintWantGot(d))
+ }
+ })
+ }
+}
+
func TestPipelineTask_ValidateRegularTask_Success(t *testing.T) {
tests := []struct {
name string
@@ -551,6 +593,13 @@ func TestPipelineTask_ValidateRegularTask_Success(t *testing.T) {
TaskRef: &TaskRef{ResolverRef: ResolverRef{Resolver: "bar", Params: Params{}}},
},
configMap: map[string]string{"enable-api-fields": "beta"},
+ }, {
+ name: "pipeline task - use of bundle with the feature flag set",
+ tasks: PipelineTask{
+ Name: "foo",
+ TaskRef: &TaskRef{Name: "bar", Bundle: "docker.io/foo"},
+ },
+ configMap: map[string]string{"enable-tekton-oci-bundles": "true"},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -568,7 +617,6 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
name string
task PipelineTask
expectedError apis.FieldError
- configMap map[string]string
}{{
name: "pipeline task - invalid taskSpec",
task: PipelineTask{
@@ -600,58 +648,22 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
Paths: []string{"taskRef.name"},
},
}, {
- name: "pipeline task - taskRef with resolver and k8s style name",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "foo", ResolverRef: ResolverRef{Resolver: "git"}},
- },
- expectedError: apis.FieldError{
- Message: `invalid value: invalid URI for request`,
- Paths: []string{"taskRef.name"},
- },
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
- }, {
- name: "pipeline task - taskRef with url-like name without enable-concise-resolver-syntax",
+ name: "pipeline task - use of bundle without the feature flag set",
task: PipelineTask{
Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar"},
+ TaskRef: &TaskRef{Name: "bar", Bundle: "docker.io/foo"},
},
- expectedError: *apis.ErrMissingField("taskRef.resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- Paths: []string{"taskRef"},
- }),
+ expectedError: *apis.ErrGeneric("bundle requires \"enable-tekton-oci-bundles\" feature gate to be true but it is false"),
}, {
- name: "pipeline task - taskRef without enable-concise-resolver-syntax",
+ name: "pipeline task - taskRef with resolver and name",
task: PipelineTask{
Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar", ResolverRef: ResolverRef{Resolver: "git"}},
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- Paths: []string{"taskRef"},
- },
- }, {
- name: "pipeline task - taskRef with url-like name without resolver",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo.com/bar"},
- },
- expectedError: apis.FieldError{
- Message: `missing field(s)`,
- Paths: []string{"taskRef.resolver"},
- },
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
- }, {
- name: "pipeline task - taskRef with name and params",
- task: PipelineTask{
- Name: "foo",
- TaskRef: &TaskRef{Name: "https://foo/bar", ResolverRef: ResolverRef{Resolver: "git", Params: Params{{Name: "foo", Value: ParamValue{StringVal: "bar"}}}}},
+ TaskRef: &TaskRef{Name: "foo", ResolverRef: ResolverRef{Resolver: "git"}},
},
expectedError: apis.FieldError{
Message: `expected exactly one, got both`,
- Paths: []string{"taskRef.name", "taskRef.params"},
+ Paths: []string{"taskRef.name", "taskRef.resolver"},
},
- configMap: map[string]string{"enable-concise-resolver-syntax": "true"},
}, {
name: "pipeline task - taskRef with resolver params but no resolver",
task: PipelineTask{
@@ -665,8 +677,7 @@ func TestPipelineTask_ValidateRegularTask_Failure(t *testing.T) {
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- ctx := cfgtesting.SetFeatureFlags(context.Background(), t, tt.configMap)
- err := tt.task.validateTask(ctx)
+ err := tt.task.validateTask(context.Background())
if err == nil {
t.Error("PipelineTask.validateTask() did not return error for invalid pipeline task")
}
@@ -698,8 +709,7 @@ func TestPipelineTask_Validate_Failure(t *testing.T) {
p: PipelineTask{Name: "foo", TaskSpec: &EmbeddedTask{
TypeMeta: runtime.TypeMeta{
APIVersion: "example.com",
- },
- }},
+ }}},
expectedError: *apis.ErrInvalidValue("custom task spec must specify kind", "taskSpec.kind"),
}, {
name: "custom task reference in taskref missing apiversion",
@@ -710,9 +720,19 @@ func TestPipelineTask_Validate_Failure(t *testing.T) {
p: PipelineTask{Name: "foo", TaskSpec: &EmbeddedTask{
TypeMeta: runtime.TypeMeta{
Kind: "Example",
- },
- }},
+ }}},
expectedError: *apis.ErrInvalidValue("custom task spec must specify apiVersion", "taskSpec.apiVersion"),
+ }, {
+ name: "invalid bundle without bundle name",
+ p: PipelineTask{
+ Name: "invalid-bundle",
+ TaskRef: &TaskRef{Bundle: "bundle"},
+ },
+ expectedError: apis.FieldError{
+ Message: `missing field(s)`,
+ Paths: []string{"taskRef.name"},
+ },
+ wc: enableFeatures(t, []string{"enable-tekton-oci-bundles"}),
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -768,44 +788,35 @@ func TestPipelineTaskList_Deps(t *testing.T) {
},
}, {
name: "valid pipeline with Task Results deps",
- tasks: []PipelineTask{
- {
- Name: "task-1",
- }, {
- Name: "task-2",
- Params: Params{
- {
- Value: ParamValue{
- Type: "string",
- StringVal: "$(tasks.task-1.results.result)",
- },
- },
- },
- },
+ tasks: []PipelineTask{{
+ Name: "task-1",
+ }, {
+ Name: "task-2",
+ Params: Params{{
+ Value: ParamValue{
+ Type: "string",
+ StringVal: "$(tasks.task-1.results.result)",
+ }},
+ }},
},
expectedDeps: map[string][]string{
"task-2": {"task-1"},
},
}, {
name: "valid pipeline with Task Results in Matrix deps",
- tasks: []PipelineTask{
- {
- Name: "task-1",
- }, {
- Name: "task-2",
- Matrix: &Matrix{
- Params: Params{
- {
- Value: ParamValue{
- Type: ParamTypeArray,
- ArrayVal: []string{
- "$(tasks.task-1.results.result)",
- },
- },
+ tasks: []PipelineTask{{
+ Name: "task-1",
+ }, {
+ Name: "task-2",
+ Matrix: &Matrix{
+ Params: Params{{
+ Value: ParamValue{
+ Type: ParamTypeArray,
+ ArrayVal: []string{
+ "$(tasks.task-1.results.result)",
},
- },
- },
- },
+ }},
+ }}},
},
expectedDeps: map[string][]string{
"task-2": {"task-1"},
@@ -843,38 +854,67 @@ func TestPipelineTaskList_Validate(t *testing.T) {
expectedError *apis.FieldError
wc func(context.Context) context.Context
}{{
- name: "validate all valid custom task, and regular task",
+ name: "validate all three valid custom task, bundle, and regular task",
tasks: PipelineTaskList{{
Name: "valid-custom-task",
TaskRef: &TaskRef{APIVersion: "example.com", Kind: "custom"},
+ }, {
+ Name: "valid-bundle",
+ TaskRef: &TaskRef{Bundle: "bundle", Name: "bundle"},
}, {
Name: "valid-task",
TaskRef: &TaskRef{Name: "task"},
}},
path: "tasks",
+ wc: enableFeatures(t, []string{"enable-tekton-oci-bundles"}),
}, {
- name: "validate list of tasks with valid custom task and invalid regular task",
+ name: "validate list of tasks with valid custom task and bundle but invalid regular task",
tasks: PipelineTaskList{{
Name: "valid-custom-task",
TaskRef: &TaskRef{APIVersion: "example.com", Kind: "custom"},
+ }, {
+ Name: "valid-bundle",
+ TaskRef: &TaskRef{Bundle: "bundle", Name: "bundle"},
}, {
Name: "invalid-task-without-name",
TaskRef: &TaskRef{Name: ""},
}},
path: "tasks",
- expectedError: apis.ErrGeneric(`missing field(s)`, "tasks[1].taskRef.name"),
+ expectedError: apis.ErrGeneric(`missing field(s)`, "tasks[2].taskRef.name"),
+ wc: enableFeatures(t, []string{"enable-tekton-oci-bundles"}),
+ }, {
+ name: "validate list of tasks with valid custom task but invalid bundle and invalid regular task",
+ tasks: PipelineTaskList{{
+ Name: "valid-custom-task",
+ TaskRef: &TaskRef{APIVersion: "example.com", Kind: "custom"},
+ }, {
+ Name: "invalid-bundle",
+ TaskRef: &TaskRef{Bundle: "bundle"},
+ }, {
+ Name: "invalid-task-without-name",
+ TaskRef: &TaskRef{Name: ""},
+ }},
+ path: "tasks",
+ expectedError: apis.ErrGeneric(`missing field(s)`, "tasks[2].taskRef.name").Also(
+ apis.ErrGeneric(`missing field(s)`, "tasks[1].taskRef.name")),
+ wc: enableFeatures(t, []string{"enable-tekton-oci-bundles"}),
}, {
- name: "validate all invalid tasks - custom task and regular task",
+ name: "validate all three invalid tasks - custom task, bundle and regular task",
tasks: PipelineTaskList{{
Name: "invalid-custom-task",
TaskRef: &TaskRef{APIVersion: "example.com"},
+ }, {
+ Name: "invalid-bundle",
+ TaskRef: &TaskRef{Bundle: "bundle"},
}, {
Name: "invalid-task",
TaskRef: &TaskRef{Name: ""},
}},
path: "tasks",
- expectedError: apis.ErrGeneric(`missing field(s)`, "tasks[1].taskRef.name").Also(
+ expectedError: apis.ErrGeneric(`missing field(s)`, "tasks[2].taskRef.name").Also(
+ apis.ErrGeneric(`missing field(s)`, "tasks[1].taskRef.name")).Also(
apis.ErrGeneric(`invalid value: custom task ref must specify kind`, "tasks[0].taskRef.kind")),
+ wc: enableFeatures(t, []string{"enable-tekton-oci-bundles"}),
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -907,8 +947,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
}},
@@ -919,15 +958,12 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
pt: &PipelineTask{
Name: "task",
Matrix: &Matrix{
- Include: IncludeParamsList{
- {
- Name: "duplicate-param",
- Params: Params{{
- Name: "duplicate", Value: ParamValue{Type: ParamTypeString, StringVal: "foo"},
- }},
- },
- },
- },
+ Include: IncludeParamsList{{
+ Name: "duplicate-param",
+ Params: Params{{
+ Name: "duplicate", Value: ParamValue{Type: ParamTypeString, StringVal: "foo"},
+ }}},
+ }},
Params: Params{{
Name: "duplicate", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
}},
@@ -942,8 +978,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
}, {
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo-1", "bar-1"}},
- }},
- },
+ }}},
},
wantErrs: &apis.FieldError{
Message: `parameter names must be unique, the parameter "foobar" is also defined at`,
@@ -956,8 +991,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "barfoo", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"bar", "foo"}},
}},
@@ -967,17 +1001,14 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
pt: &PipelineTask{
Name: "task",
Matrix: &Matrix{
- Include: IncludeParamsList{
- {
- Name: "invalid-include",
- Params: Params{{
- Name: "foobar", Value: ParamValue{Type: ParamTypeString, StringVal: "foo"},
- }, {
- Name: "foobar", Value: ParamValue{Type: ParamTypeString, StringVal: "foo-1"},
- }},
- },
- },
- },
+ Include: IncludeParamsList{{
+ Name: "invalid-include",
+ Params: Params{{
+ Name: "foobar", Value: ParamValue{Type: ParamTypeString, StringVal: "foo"},
+ }, {
+ Name: "foobar", Value: ParamValue{Type: ParamTypeString, StringVal: "foo-1"},
+ }}},
+ }},
},
wantErrs: &apis.FieldError{
Message: `parameter names must be unique, the parameter "foobar" is also defined at`,
@@ -997,8 +1028,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Name: "foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.foobar[*])"},
}, {
Name: "bar", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.barfoo[*])"},
- }},
- },
+ }}},
},
}, {
name: "parameters in matrix contain result references",
@@ -1007,8 +1037,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.foo-task.results.a-result)"}},
- }},
- },
+ }}},
},
}, {
name: "count of combinations of parameters in the matrix exceeds the maximum",
@@ -1019,8 +1048,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac", "windows"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "firefox", "safari"}},
- }},
- },
+ }}},
},
wantErrs: &apis.FieldError{
Message: "expected 0 <= 9 <= 4",
@@ -1035,8 +1063,7 @@ func TestPipelineTask_ValidateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "firefox"}},
- }},
- },
+ }}},
},
}, {
name: "valid matrix emitting string results consumed in aggregate by another pipelineTask",
@@ -1209,8 +1236,7 @@ func TestPipelineTask_IsMatrixed(t *testing.T) {
},
},
expected: true,
- },
- {
+ }, {
name: "matrixed with include",
arg: arg{
Matrix: &Matrix{
@@ -1219,14 +1245,12 @@ func TestPipelineTask_IsMatrixed(t *testing.T) {
Params: Params{{
Name: "IMAGE", Value: ParamValue{Type: ParamTypeString, StringVal: "image-1"},
}, {
- Name: "DOCKERFILE", Value: ParamValue{Type: ParamTypeString, StringVal: "path/to/Dockerfile1"},
- }},
+ Name: "DOCKERFILE", Value: ParamValue{Type: ParamTypeString, StringVal: "path/to/Dockerfile1"}}},
}},
},
},
expected: true,
- },
- {
+ }, {
name: "matrixed with params and include",
arg: arg{
Matrix: &Matrix{
@@ -1236,8 +1260,7 @@ func TestPipelineTask_IsMatrixed(t *testing.T) {
Include: IncludeParamsList{{
Name: "common-package",
Params: Params{{
- Name: "package", Value: ParamValue{Type: ParamTypeString, StringVal: "path/to/common/package/"},
- }},
+ Name: "package", Value: ParamValue{Type: ParamTypeString, StringVal: "path/to/common/package/"}}},
}},
},
},
@@ -1262,37 +1285,36 @@ func TestEmbeddedTask_IsCustomTask(t *testing.T) {
name string
et *EmbeddedTask
want bool
- }{
- {
- name: "not a custom task - APIVersion and Kind are not set",
- et: &EmbeddedTask{},
- want: false,
- }, {
- name: "not a custom task - APIVersion is not set",
- et: &EmbeddedTask{
- TypeMeta: runtime.TypeMeta{
- Kind: "Example",
- },
+ }{{
+ name: "not a custom task - APIVersion and Kind are not set",
+ et: &EmbeddedTask{},
+ want: false,
+ }, {
+ name: "not a custom task - APIVersion is not set",
+ et: &EmbeddedTask{
+ TypeMeta: runtime.TypeMeta{
+ Kind: "Example",
},
- want: false,
- }, {
- name: "not a custom task - Kind is not set",
- et: &EmbeddedTask{
- TypeMeta: runtime.TypeMeta{
- APIVersion: "example/v0",
- },
+ },
+ want: false,
+ }, {
+ name: "not a custom task - Kind is not set",
+ et: &EmbeddedTask{
+ TypeMeta: runtime.TypeMeta{
+ APIVersion: "example/v0",
},
- want: false,
- }, {
- name: "custom task - APIVersion and Kind are set",
- et: &EmbeddedTask{
- TypeMeta: runtime.TypeMeta{
- Kind: "Example",
- APIVersion: "example/v0",
- },
+ },
+ want: false,
+ }, {
+ name: "custom task - APIVersion and Kind are set",
+ et: &EmbeddedTask{
+ TypeMeta: runtime.TypeMeta{
+ Kind: "Example",
+ APIVersion: "example/v0",
},
- want: true,
},
+ want: true,
+ },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1312,8 +1334,7 @@ func TestPipelineChecksum(t *testing.T) {
pipeline: &Pipeline{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1beta1",
- Kind: "Pipeline",
- },
+ Kind: "Pipeline"},
ObjectMeta: metav1.ObjectMeta{
Name: "pipeline",
Namespace: "pipeline-ns",
@@ -1328,8 +1349,7 @@ func TestPipelineChecksum(t *testing.T) {
pipeline: &Pipeline{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1beta1",
- Kind: "Pipeline",
- },
+ Kind: "Pipeline"},
ObjectMeta: metav1.ObjectMeta{
Name: "pipeline",
Namespace: "pipeline-ns",
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation.go
index 8f8d6a1f002..f1c34eee5e5 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation.go
@@ -21,7 +21,7 @@ import (
"fmt"
"strings"
- "github.com/tektoncd/pipeline/internal/artifactref"
+ "github.com/google/go-containerregistry/pkg/name"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
@@ -92,7 +92,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateTasksAndFinallySection(ps))
errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally))
errs = errs.Also(validateWhenExpressions(ctx, ps.Tasks, ps.Finally))
- errs = errs.Also(validateArtifactReference(ctx, ps.Tasks, ps.Finally))
errs = errs.Also(validateMatrix(ctx, ps.Tasks).ViaField("tasks"))
errs = errs.Also(validateMatrix(ctx, ps.Finally).ViaField("finally"))
return errs
@@ -154,10 +153,10 @@ func (l PipelineTaskList) Validate(ctx context.Context, taskNames sets.String, p
}
// validateUsageOfDeclaredPipelineTaskParameters validates that all parameters referenced in the pipeline Task are declared by the pipeline Task.
-func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, additionalParams []ParamSpec, path string) (errs *apis.FieldError) {
+func (l PipelineTaskList) validateUsageOfDeclaredPipelineTaskParameters(ctx context.Context, path string) (errs *apis.FieldError) {
for i, t := range l {
if t.TaskSpec != nil {
- errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, append(t.TaskSpec.Params, additionalParams...)).ViaFieldIndex(path, i))
+ errs = errs.Also(ValidateUsageOfDeclaredParameters(ctx, t.TaskSpec.Steps, t.TaskSpec.Params).ViaFieldIndex(path, i))
}
}
return errs
@@ -197,7 +196,7 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
}
if pt.OnError != "" {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.BetaAPIFields))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "OnError", config.AlphaAPIFields))
if pt.OnError != PipelineTaskContinue && pt.OnError != PipelineTaskStopAndFail {
errs = errs.Also(apis.ErrInvalidValue(pt.OnError, "OnError", "PipelineTask OnError must be either \"continue\" or \"stopAndFail\""))
}
@@ -206,6 +205,7 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
}
}
+ cfg := config.FromContextOrDefaults(ctx)
// Pipeline task having taskRef/taskSpec with APIVersion is classified as custom task
switch {
case pt.TaskRef != nil && !taskKinds[pt.TaskRef.Kind]:
@@ -216,6 +216,9 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(pt.validateCustomTask())
case pt.TaskSpec != nil && pt.TaskSpec.APIVersion != "":
errs = errs.Also(pt.validateCustomTask())
+ // If EnableTektonOCIBundles feature flag is on, validate bundle specifications
+ case cfg.FeatureFlags.EnableTektonOCIBundles && pt.TaskRef != nil && pt.TaskRef.Bundle != "":
+ errs = errs.Also(pt.validateBundle())
default:
errs = errs.Also(pt.validateTask(ctx))
}
@@ -353,6 +356,21 @@ func (pt PipelineTask) validateCustomTask() (errs *apis.FieldError) {
return errs
}
+// validateBundle validates bundle specifications - checking name and bundle
+func (pt PipelineTask) validateBundle() (errs *apis.FieldError) {
+ // bundle requires a TaskRef to be specified
+ if (pt.TaskRef != nil && pt.TaskRef.Bundle != "") && pt.TaskRef.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("taskRef.name"))
+ }
+ // If a bundle url is specified, ensure it is parsable
+ if pt.TaskRef != nil && pt.TaskRef.Bundle != "" {
+ if _, err := name.ParseReference(pt.TaskRef.Bundle); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("invalid bundle reference (%s)", err.Error()), "taskRef.bundle"))
+ }
+ }
+ return errs
+}
+
// validateTask validates a pipeline task or a final task for taskRef and taskSpec
func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) {
if pt.TaskSpec != nil {
@@ -385,8 +403,8 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration)
// validatePipelineParameterUsage validates that parameters referenced in the Pipeline are declared by the Pipeline
func (ps *PipelineSpec) validatePipelineParameterUsage(ctx context.Context) (errs *apis.FieldError) {
- errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "tasks"))
- errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, ps.Params, "finally"))
+ errs = errs.Also(PipelineTaskList(ps.Tasks).validateUsageOfDeclaredPipelineTaskParameters(ctx, "tasks"))
+ errs = errs.Also(PipelineTaskList(ps.Finally).validateUsageOfDeclaredPipelineTaskParameters(ctx, "finally"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Tasks, ps.Params).ViaField("tasks"))
errs = errs.Also(validatePipelineTaskParameterUsage(ps.Finally, ps.Params).ViaField("finally"))
return errs
@@ -495,13 +513,9 @@ func (pt *PipelineTask) extractAllParams() Params {
return allParams
}
-// containsExecutionStatusRef checks if a specified param has a reference to execution status or reason
-// $(tasks..status), $(tasks.status), or $(tasks..reason)
func containsExecutionStatusRef(p string) bool {
- if strings.HasPrefix(p, "tasks.") {
- if strings.HasSuffix(p, ".status") || strings.HasSuffix(p, ".reason") {
- return true
- }
+ if strings.HasPrefix(p, "tasks.") && strings.HasSuffix(p, ".status") {
+ return true
}
return false
}
@@ -592,17 +606,10 @@ func validateExecutionStatusVariablesExpressions(expressions []string, ptNames s
if expression == PipelineTasksAggregateStatus {
continue
}
- // check if it contains context variable accessing execution status - $(tasks.taskname.status) | $(tasks.taskname.reason)
+ // check if it contains context variable accessing execution status - $(tasks.taskname.status)
if containsExecutionStatusRef(expression) {
- var pt string
- if strings.HasSuffix(expression, ".status") {
- // strip tasks. and .status from tasks.taskname.status to further verify task name
- pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
- }
- if strings.HasSuffix(expression, ".reason") {
- // strip tasks. and .reason from tasks.taskname.reason to further verify task name
- pt = strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".reason")
- }
+ // strip tasks. and .status from tasks.taskname.status to further verify task name
+ pt := strings.TrimSuffix(strings.TrimPrefix(expression, "tasks."), ".status")
// report an error if the task name does not exist in the list of dag tasks
if !ptNames.Has(pt) {
errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("pipeline task %s is not defined in the pipeline", pt), fieldPath))
@@ -879,28 +886,6 @@ func validateStringResults(results []TaskResult, resultName string) (errs *apis.
return errs
}
-// validateArtifactReference ensure that the feature flag enableArtifacts is set to true when using artifacts
-func validateArtifactReference(ctx context.Context, tasks []PipelineTask, finalTasks []PipelineTask) (errs *apis.FieldError) {
- if config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
- return errs
- }
- for i, t := range tasks {
- for _, v := range t.Params.extractValues() {
- if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
- return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("tasks", i))
- }
- }
- }
- for i, t := range finalTasks {
- for _, v := range t.Params.extractValues() {
- if len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(v, -1)) > 0 {
- return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "").ViaField("params").ViaFieldIndex("finally", i))
- }
- }
- }
- return errs
-}
-
// GetIndexingReferencesToArrayParams returns all strings referencing indices of PipelineRun array parameters
// from parameters, workspaces, and when expressions defined in the Pipeline's Tasks and Finally Tasks.
// For example, if a Task in the Pipeline has a parameter with a value "$(params.array-param-name[1])",
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go
index 8faf62c9182..adb5810009b 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipeline_validation_test.go
@@ -25,11 +25,13 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
"github.com/tektoncd/pipeline/test/diff"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/apis"
+ logtesting "knative.dev/pkg/logging/testing"
)
func TestPipeline_Validate_Success(t *testing.T) {
@@ -58,8 +60,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
p: &Pipeline{
ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
Spec: PipelineSpec{
- Tasks: []PipelineTask{{
- Name: "foo",
+ Tasks: []PipelineTask{{Name: "foo",
TaskSpec: &EmbeddedTask{
TypeMeta: runtime.TypeMeta{
APIVersion: "example.dev/v0",
@@ -67,8 +68,7 @@ func TestPipeline_Validate_Success(t *testing.T) {
},
Spec: runtime.RawExtension{
Raw: []byte(`{"field1":123,"field2":"value"}`),
- },
- },
+ }},
}},
},
},
@@ -116,90 +116,6 @@ func TestPipeline_Validate_Success(t *testing.T) {
},
},
},
- }, {
- name: "propagating params into Step",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Type: ParamTypeArray,
- Default: &ParamValue{
- Type: ParamTypeArray,
- ArrayVal: []string{"hello", "pipeline"},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words[*])"},
- }},
- }},
- }},
- },
- },
- }, {
- name: "propagating object params with pipelinespec and taskspec",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Default: &ParamValue{
- Type: ParamTypeObject,
- ObjectVal: map[string]string{"hello": "pipeline"},
- },
- Type: ParamTypeObject,
- Properties: map[string]PropertySpec{
- "hello": {Type: ParamTypeString},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.pipeline-words.hello)"},
- }},
- }},
- }},
- },
- },
- }, {
- name: "valid pipeline with pipeline task and final task referencing artifacts in task params with enable-artifacts flag true",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag true",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }, {
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "true",
- "enable-api-fields": "alpha"})
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -389,8 +305,7 @@ func TestPipeline_Validate_Failure(t *testing.T) {
return cfgtesting.SetFeatureFlags(ctx, t,
map[string]string{
"disable-inline-spec": "pipeline",
- "enable-api-fields": "alpha",
- })
+ "enable-api-fields": "alpha"})
},
}, {
name: "pipelineSpec when disable-inline-spec all",
@@ -410,8 +325,7 @@ func TestPipeline_Validate_Failure(t *testing.T) {
return cfgtesting.SetFeatureFlags(ctx, t,
map[string]string{
"disable-inline-spec": "pipeline,taskrun,pipelinerun",
- "enable-api-fields": "alpha",
- })
+ "enable-api-fields": "alpha"})
},
}, {
name: "taskSpec when disable-inline-spec",
@@ -453,78 +367,6 @@ func TestPipeline_Validate_Failure(t *testing.T) {
},
})
},
- }, {
- name: "propagating params with pipelinespec and taskspec",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "pipeline-words",
- Type: ParamTypeArray,
- Default: &ParamValue{
- Type: ParamTypeArray,
- ArrayVal: []string{"hello", "pipeline"},
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Steps: []Step{{
- Name: "echo",
- Image: "ubuntu",
- Command: []string{"echo"},
- Args: []string{"$(params.random-words[*])"},
- }},
- }},
- }},
- },
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.random-words[*])"`,
- Paths: []string{"spec.tasks[0].steps[0].args[0]"},
- },
- }, {
- name: "propagating params to taskRef",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinename",
- },
- Spec: PipelineSpec{
- Params: ParamSpecs{{
- Name: "hello",
- Type: ParamTypeString,
- Default: &ParamValue{
- Type: ParamTypeString,
- StringVal: "hi",
- },
- }},
- Tasks: []PipelineTask{{
- Name: "echoit",
- TaskRef: &TaskRef{
- Name: "remote-task",
- },
- Params: Params{{
- Name: "param1",
- Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(params.param1)",
- },
- }, {
- Name: "holla",
- Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(params.hello)",
- },
- }},
- }},
- },
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.param1)"`,
- Paths: []string{"spec.tasks[0].params[param1]"},
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1224,59 +1066,6 @@ func TestPipelineSpec_Validate_Failure(t *testing.T) {
Message: `must not set the field(s)`,
Paths: []string{"finally[0].taskSpec.resources"},
},
- }, {
- name: "invalid pipeline with one pipeline task referencing artifacts in task params with enable-artifacts flag false",
- ps: &PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag false",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }, {
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-artifacts should be set to true to use artifacts feature.`,
- Paths: []string{"tasks[1].params"},
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "false",
- "enable-api-fields": "alpha"})
- },
- }, {
- name: "invalid pipeline with one final pipeline task referencing artifacts in params with enable-artifacts flag false",
- ps: &PipelineSpec{
- Description: "this is an invalid pipeline referencing artifacts with enable-artifacts flag false",
- Tasks: []PipelineTask{{
- Name: "pre-task",
- TaskRef: &TaskRef{Name: "foo-task"},
- }},
- Finally: []PipelineTask{{
- Name: "consume-artifacts-task",
- Params: Params{{Name: "aaa", Value: ParamValue{
- Type: ParamTypeString,
- StringVal: "$(tasks.produce-artifacts-task.outputs.image)",
- }}},
- TaskSpec: &EmbeddedTask{TaskSpec: getTaskSpec()},
- }},
- },
- wc: func(ctx context.Context) context.Context {
- return cfgtesting.SetFeatureFlags(ctx, t,
- map[string]string{
- "enable-artifacts": "false",
- "enable-api-fields": "alpha"})
- },
- expectedError: apis.FieldError{
- Message: `feature flag enable-artifacts should be set to true to use artifacts feature.`,
- Paths: []string{"finally[0].params"},
- },
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1496,66 +1285,64 @@ func TestFinallyTaskResultsToPipelineResults_Success(t *testing.T) {
name string
p *Pipeline
wc func(context.Context) context.Context
- }{
- {
- name: "valid pipeline with pipeline results",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Results: []PipelineResult{{
- Name: "initialized",
- Value: *NewStructuredValues("$(tasks.clone-app-repo.results.initialized)"),
- }},
- Tasks: []PipelineTask{{
- Name: "clone-app-repo",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "initialized",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo", Image: "bar",
- }},
+ }{{
+ name: "valid pipeline with pipeline results",
+ p: &Pipeline{
+ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
+ Spec: PipelineSpec{
+ Results: []PipelineResult{{
+ Name: "initialized",
+ Value: *NewStructuredValues("$(tasks.clone-app-repo.results.initialized)"),
+ }},
+ Tasks: []PipelineTask{{
+ Name: "clone-app-repo",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "initialized",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo", Image: "bar",
}},
}},
- },
+ }},
},
- }, {
- name: "referencing existent finally task result",
- p: &Pipeline{
- ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
- Spec: PipelineSpec{
- Results: []PipelineResult{{
- Name: "initialized",
- Value: *NewStructuredValues("$(finally.check-git-commit.results.init)"),
- }},
- Tasks: []PipelineTask{{
- Name: "clone-app-repo",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "current-date-unix-timestamp",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo", Image: "bar",
- }},
+ }}, {
+ name: "referencing existent finally task result",
+ p: &Pipeline{
+ ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
+ Spec: PipelineSpec{
+ Results: []PipelineResult{{
+ Name: "initialized",
+ Value: *NewStructuredValues("$(finally.check-git-commit.results.init)"),
+ }},
+ Tasks: []PipelineTask{{
+ Name: "clone-app-repo",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "current-date-unix-timestamp",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo", Image: "bar",
}},
}},
- Finally: []PipelineTask{{
- Name: "check-git-commit",
- TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
- Results: []TaskResult{{
- Name: "init",
- Type: "string",
- }},
- Steps: []Step{{
- Name: "foo2", Image: "bar",
- }},
+ }},
+ Finally: []PipelineTask{{
+ Name: "check-git-commit",
+ TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
+ Results: []TaskResult{{
+ Name: "init",
+ Type: "string",
+ }},
+ Steps: []Step{{
+ Name: "foo2", Image: "bar",
}},
}},
- },
+ }},
},
},
+ },
}
for _, tt := range tests {
@@ -1799,8 +1586,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.baz)", "and", "$(params.foo-is-baz)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid star array parameter variables in matrix",
@@ -1815,8 +1601,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.baz[*])", "and", "$(params.foo-is-baz[*])"}},
- }},
- },
+ }}},
}},
}, {
name: "array param - using the whole variable as a param's value that is intended to be array type",
@@ -1842,13 +1627,9 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Include: IncludeParamsList{{
Name: "build-1",
- Params: Params{
- {
- Name: "a-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.baz)"},
- },
- },
- }},
- },
+ Params: Params{{
+ Name: "a-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.baz)"}},
+ }}}},
}},
}, {
name: "object param - using single individual variable in string param",
@@ -1940,8 +1721,7 @@ func TestValidatePipelineParameterVariables_Success(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.key1)", "and", "$(params.myObject.key2)"}},
- }},
- },
+ }}},
}},
}, {
name: "object param - using the whole variable as a param's value that is intended to be object type",
@@ -2123,8 +1903,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2141,8 +1920,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.foo)", "and", "$(params.does-not-exist)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2160,9 +1938,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.foo)"}},
}, {
- Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}},
- }},
- },
+ Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.does-not-exist)"}}}}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2183,8 +1959,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
}, {
Name: "b-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(params.does-not-exist)"},
}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.does-not-exist)"`,
@@ -2323,8 +2098,7 @@ func TestValidatePipelineDeclaredParameterUsage_Failure(t *testing.T) {
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.key1)"}},
}, {
Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(params.myObject.non-exist-key)"}},
- }},
- },
+ }}},
}},
expectedError: apis.FieldError{
Message: `non-existent variable in "$(params.myObject.non-exist-key)"`,
@@ -2352,8 +2126,7 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) {
expectedError apis.FieldError
configMap map[string]string
}{
- {
- name: "param enum with array type - failure",
+ {name: "param enum with array type - failure",
params: []ParamSpec{{
Name: "param2",
Type: ParamTypeArray,
@@ -2578,8 +2351,7 @@ func TestValidatePipelineParameterVariables_Failure(t *testing.T) {
Message: `parameter names must be unique, the parameter "duplicate-param" is also defined at`,
Paths: []string{"[0].params[1].name, [0].params[2].name"},
},
- },
- }
+ }}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
@@ -3482,8 +3254,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun name",
@@ -3496,8 +3267,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun namespace",
@@ -3510,8 +3280,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.namespace)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineRun uid",
@@ -3524,8 +3293,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.uid)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid array context variables for Pipeline and PipelineRun names",
@@ -3538,8 +3306,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.name)", "and", "$(context.pipelineRun.name)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for PipelineTask retries",
@@ -3552,8 +3319,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{StringVal: "$(context.pipelineTask.retries)"},
- }},
- },
+ }}},
}},
}, {
name: "valid array context variable for PipelineTask retries",
@@ -3566,8 +3332,7 @@ func TestContextValid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-mat", Value: ParamValue{ArrayVal: []string{"$(context.pipelineTask.retries)"}},
- }},
- },
+ }}},
}},
}, {
name: "valid string context variable for Pipeline name in include params",
@@ -3581,10 +3346,8 @@ func TestContextValid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.name)"},
- }},
- }},
- },
+ Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.name)"}}},
+ }}},
}},
}, {
name: "valid string context variable for PipelineTask retries in matrix include",
@@ -3598,10 +3361,8 @@ func TestContextValid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.retries)"},
- }},
- }},
- },
+ Name: "a-param-mat", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.retries)"}}},
+ }}},
}},
}}
for _, tt := range tests {
@@ -3629,8 +3390,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipeline.missing)"`,
@@ -3650,8 +3410,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipelineRun.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineRun.missing)"`,
@@ -3671,8 +3430,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param-foo", Value: ParamValue{ArrayVal: []string{"$(context.pipelineTask.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineTask.missing)"`,
@@ -3692,8 +3450,7 @@ func TestContextInvalid(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{ArrayVal: []string{"$(context.pipeline.missing-foo)", "$(context.pipelineTask.missing-foo)", "$(context.pipelineRun.missing-foo)"}},
- }},
- },
+ }}},
}},
expectedError: *apis.ErrGeneric(`non-existent variable in "$(context.pipeline.missing)"`, "value").
Also(apis.ErrGeneric(`non-existent variable in "$(context.pipelineRun.missing)"`, "value")).
@@ -3710,10 +3467,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipeline.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipeline.missing)"`,
@@ -3728,10 +3483,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineRun.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineRun.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineRun.missing)"`,
@@ -3746,10 +3499,8 @@ func TestContextInvalid(t *testing.T) {
Include: IncludeParamsList{{
Name: "build-1",
Params: Params{{
- Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.missing)"},
- }},
- }},
- },
+ Name: "a-param-foo", Value: ParamValue{Type: ParamTypeString, StringVal: "$(context.pipelineTask.missing)"}}},
+ }}},
}},
expectedError: *apis.ErrGeneric("").Also(&apis.FieldError{
Message: `non-existent variable in "$(context.pipelineTask.missing)"`,
@@ -3785,8 +3536,6 @@ func TestPipelineTasksExecutionStatus(t *testing.T) {
TaskRef: &TaskRef{Name: "bar-task"},
Params: Params{{
Name: "foo-status", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.foo.status)"},
- }, {
- Name: "foo-reason", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.foo.reason)"},
}, {
Name: "tasks-status", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.status)"},
}},
@@ -3794,10 +3543,6 @@ func TestPipelineTasksExecutionStatus(t *testing.T) {
Input: "$(tasks.foo.status)",
Operator: selection.In,
Values: []string{"Failure"},
- }, {
- Input: "$(tasks.foo.reason)",
- Operator: selection.In,
- Values: []string{"Failed"},
}, {
Input: "$(tasks.status)",
Operator: selection.In,
@@ -4035,8 +3780,7 @@ func TestMatrixIncompatibleAPIVersions(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}
tests := []struct {
name string
@@ -4099,8 +3843,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
}},
@@ -4114,8 +3857,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "foobar", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
Params: Params{{
Name: "barfoo", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"bar", "foo"}},
}},
@@ -4128,16 +3870,14 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.foo-task.results.a-result)"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
Matrix: &Matrix{
Params: Params{{
Name: "b-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.bar-task.results.b-result)"}},
- }},
- },
+ }}},
}},
}, {
name: "parameters in matrix contain whole array results references",
@@ -4147,8 +3887,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"$(tasks.foo-task.results.a-task-results[*])"}},
- }},
- },
+ }}},
}},
}, {
name: "results from matrixed task consumed in tasks through parameters",
@@ -4158,8 +3897,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4175,8 +3913,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}},
finally: PipelineTaskList{{
Name: "b-task",
@@ -4193,8 +3930,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4217,8 +3953,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4236,8 +3971,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}},
finally: PipelineTaskList{{
Name: "b-task",
@@ -4256,8 +3990,7 @@ func Test_validateMatrix(t *testing.T) {
Matrix: &Matrix{
Params: Params{{
Name: "a-param", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }},
- },
+ }}},
}, {
Name: "b-task",
TaskRef: &TaskRef{Name: "b-task"},
@@ -4286,8 +4019,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "echoarrayurl",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
@@ -4309,8 +4041,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4319,8 +4050,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4354,8 +4084,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "task-consuming-results",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
@@ -4385,16 +4114,14 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4403,8 +4130,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4437,8 +4163,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4447,24 +4172,21 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
Matrix: &Matrix{
Params: Params{{
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4473,8 +4195,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.array-result.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.array-result.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4507,8 +4228,7 @@ func Test_validateMatrix(t *testing.T) {
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4517,24 +4237,21 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
Matrix: &Matrix{
Params: Params{{
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "task-consuming-results",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "report-url",
Type: ResultsTypeString,
@@ -4543,8 +4260,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-report-url",
Image: "alpine",
Script: ` |
- echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`,
- }},
+ echo -n "https://api.example/get-report/$(params.platform)-$(params.browser)" | tee $(results.report-url.path)`}},
}},
Params: Params{{
Name: "b-param", Value: ParamValue{Type: ParamTypeString, StringVal: "$(tasks.matrix-emitting-results-embedded.results.report-url[0])"},
@@ -4561,16 +4277,14 @@ func Test_validateMatrix(t *testing.T) {
Name: "platform", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"linux", "mac"}},
}, {
Name: "browser", Value: ParamValue{Type: ParamTypeArray, ArrayVal: []string{"chrome", "safari"}},
- }},
- },
+ }}},
}, {
Name: "taskwithresult",
TaskSpec: &EmbeddedTask{TaskSpec: TaskSpec{
Params: ParamSpecs{{
Name: "platform",
}, {
- Name: "browser",
- }},
+ Name: "browser"}},
Results: []TaskResult{{
Name: "array-result",
Type: ResultsTypeArray,
@@ -4579,8 +4293,7 @@ func Test_validateMatrix(t *testing.T) {
Name: "produce-array-result",
Image: "alpine",
Script: ` |
- echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`,
- }},
+ echo -n "[\"${params.platform}\",\"${params.browser}\"]" | tee $(results.array-result.path)`}},
}},
}, {
Name: "task-consuming-results",
@@ -4620,6 +4333,22 @@ func getTaskSpec() TaskSpec {
}
}
+func enableFeatures(t *testing.T, features []string) func(context.Context) context.Context {
+ t.Helper()
+ return func(ctx context.Context) context.Context {
+ s := config.NewStore(logtesting.TestLogger(t))
+ data := make(map[string]string)
+ for _, f := range features {
+ data[f] = "true"
+ }
+ s.OnConfigChanged(&corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()},
+ Data: data,
+ })
+ return s.ToContext(ctx)
+ }
+}
+
// TestPipelineWithBetaFields tests the beta API-driven features of
// PipelineSpec are correctly governed `enable-api-fields`, which must
// be set to "alpha" or "beta".
@@ -4689,144 +4418,144 @@ func TestGetIndexingReferencesToArrayParams(t *testing.T) {
name string
spec PipelineSpec
want sets.String
- }{
- {
- name: "references in task params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeString},
- },
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[1])")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("$(params.second-param[0])")},
- {Name: "first-task-third-param", Value: *NewStructuredValues("static value")},
- },
- }},
+ }{{
+ name: "references in task params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeString},
},
- want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
- }, {
- name: "references in when expression",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeString},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[1])")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("$(params.second-param[0])")},
+ {Name: "first-task-third-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- WhenExpressions: []WhenExpression{{
- Input: "$(params.first-param[1])",
- Operator: selection.In,
- Values: []string{"$(params.second-param[0])"},
- }},
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
+ }, {
+ name: "references in when expression",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeString},
},
- want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
- }, {
- name: "nested references in task params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- },
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(input.workspace.$(params.first-param[0]))")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("$(input.workspace.$(params.second-param[1]))")},
- },
+ Tasks: []PipelineTask{{
+ WhenExpressions: []WhenExpression{{
+ Input: "$(params.first-param[1])",
+ Operator: selection.In,
+ Values: []string{"$(params.second-param[0])"},
}},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[1])", "$(params.second-param[0])"),
+ }, {
+ name: "nested references in task params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "array parameter",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default", "array", "value")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(input.workspace.$(params.first-param[0]))")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("$(input.workspace.$(params.second-param[1]))")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("firstelement", "$(params.first-param)")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("firstelement", "$(params.second-param[0])")},
- },
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "array parameter",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default", "array", "value")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.second-param[0])"),
- }, {
- name: "references in finally params",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("firstelement", "$(params.first-param)")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("firstelement", "$(params.second-param[0])")},
},
- Finally: []PipelineTask{{
- Params: Params{
- {Name: "final-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
- {Name: "final-task-second-param", Value: *NewStructuredValues("$(params.second-param[1])")},
- },
- }},
+ }},
+ },
+ want: sets.NewString("$(params.second-param[0])"),
+ }, {
+ name: "references in finally params",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "references in finally when expressions",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Finally: []PipelineTask{{
+ Params: Params{
+ {Name: "final-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
+ {Name: "final-task-second-param", Value: *NewStructuredValues("$(params.second-param[1])")},
},
- Finally: []PipelineTask{{
- WhenExpressions: WhenExpressions{{
- Input: "$(params.first-param[0])",
- Operator: selection.In,
- Values: []string{"$(params.second-param[1])"},
- }},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "references in finally when expressions",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
+ },
+ Finally: []PipelineTask{{
+ WhenExpressions: WhenExpressions{{
+ Input: "$(params.first-param[0])",
+ Operator: selection.In,
+ Values: []string{"$(params.second-param[1])"},
}},
+ }},
+ },
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ }, {
+ name: "parameter references with bracket notation and special characters",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second/param", Type: ParamTypeArray},
+ {Name: "third.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "fourth/param", Type: ParamTypeArray},
},
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
- }, {
- name: "parameter references with bracket notation and special characters",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second/param", Type: ParamTypeArray},
- {Name: "third.param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "fourth/param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
+ {Name: "first-task-second-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
+ {Name: "first-task-third-param", Value: *NewStructuredValues(`$(params['third.param'][1])`)},
+ {Name: "first-task-fourth-param", Value: *NewStructuredValues(`$(params['fourth/param'][1])`)},
+ {Name: "first-task-fifth-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
- {Name: "first-task-second-param", Value: *NewStructuredValues(`$(params["first.param"][0])`)},
- {Name: "first-task-third-param", Value: *NewStructuredValues(`$(params['third.param'][1])`)},
- {Name: "first-task-fourth-param", Value: *NewStructuredValues(`$(params['fourth/param'][1])`)},
- {Name: "first-task-fifth-param", Value: *NewStructuredValues("static value")},
- },
- }},
+ }},
+ },
+ want: sets.NewString(`$(params["first.param"][0])`, `$(params["first.param"][0])`, `$(params['third.param'][1])`, `$(params['fourth/param'][1])`),
+ }, {
+ name: "single parameter in workspace subpath",
+ spec: PipelineSpec{
+ Params: []ParamSpec{
+ {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
+ {Name: "second-param", Type: ParamTypeArray},
},
- want: sets.NewString(`$(params["first.param"][0])`, `$(params["first.param"][0])`, `$(params['third.param'][1])`, `$(params['fourth/param'][1])`),
- }, {
- name: "single parameter in workspace subpath",
- spec: PipelineSpec{
- Params: []ParamSpec{
- {Name: "first-param", Type: ParamTypeArray, Default: NewStructuredValues("default-value", "default-value-again")},
- {Name: "second-param", Type: ParamTypeArray},
+ Tasks: []PipelineTask{{
+ Params: Params{
+ {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
+ {Name: "first-task-second-param", Value: *NewStructuredValues("static value")},
},
- Tasks: []PipelineTask{{
- Params: Params{
- {Name: "first-task-first-param", Value: *NewStructuredValues("$(params.first-param[0])")},
- {Name: "first-task-second-param", Value: *NewStructuredValues("static value")},
- },
- Workspaces: []WorkspacePipelineTaskBinding{
- {
- Name: "first-workspace",
- Workspace: "first-workspace",
- SubPath: "$(params.second-param[1])",
- },
+ Workspaces: []WorkspacePipelineTaskBinding{
+ {
+ Name: "first-workspace",
+ Workspace: "first-workspace",
+ SubPath: "$(params.second-param[1])",
},
- }},
- },
- want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ },
+ }},
},
+ want: sets.NewString("$(params.first-param[0])", "$(params.second-param[1])"),
+ },
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got := tt.spec.GetIndexingReferencesToArrayParams()
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go
index efa399b6b85..ae794c8b738 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go
@@ -23,11 +23,14 @@ import (
)
func (pr PipelineRef) convertTo(ctx context.Context, sink *v1.PipelineRef) {
- sink.Name = pr.Name
+ if pr.Bundle == "" {
+ sink.Name = pr.Name
+ }
sink.APIVersion = pr.APIVersion
new := v1.ResolverRef{}
pr.ResolverRef.convertTo(ctx, &new)
sink.ResolverRef = new
+ pr.convertBundleToResolver(sink)
}
func (pr *PipelineRef) convertFrom(ctx context.Context, source v1.PipelineRef) {
@@ -37,3 +40,24 @@ func (pr *PipelineRef) convertFrom(ctx context.Context, source v1.PipelineRef) {
new.convertFrom(ctx, source.ResolverRef)
pr.ResolverRef = new
}
+
+// convertBundleToResolver converts v1beta1 bundle string to a remote reference with the bundle resolver in v1.
+// The conversion from Resolver to Bundle is not being supported since remote resolution would be turned on by
+// default and it will be in beta before the stored version of CRD getting swapped to v1.
+func (pr PipelineRef) convertBundleToResolver(sink *v1.PipelineRef) {
+ if pr.Bundle != "" {
+ sink.ResolverRef = v1.ResolverRef{
+ Resolver: "bundles",
+ Params: v1.Params{{
+ Name: "bundle",
+ Value: v1.ParamValue{StringVal: pr.Bundle, Type: v1.ParamTypeString},
+ }, {
+ Name: "name",
+ Value: v1.ParamValue{StringVal: pr.Name, Type: v1.ParamTypeString},
+ }, {
+ Name: "kind",
+ Value: v1.ParamValue{StringVal: "Pipeline", Type: v1.ParamTypeString},
+ }},
+ }
+ }
+}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_types.go b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_types.go
index cf83442be63..ab943a3242f 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_types.go
@@ -23,11 +23,9 @@ type PipelineRef struct {
// API version of the referent
// +optional
APIVersion string `json:"apiVersion,omitempty"`
-
// Bundle url reference to a Tekton Bundle.
//
// Deprecated: Please use ResolverRef with the bundles resolver instead.
- // The field is staying there for go client backward compatibility, but is not used/allowed anymore.
// +optional
Bundle string `json:"bundle,omitempty"`
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation.go b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation.go
index efe352d6371..a0b7e02f18b 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation.go
@@ -19,10 +19,9 @@ package v1beta1
import (
"context"
"fmt"
- "strings"
+ "github.com/google/go-containerregistry/pkg/name"
"github.com/tektoncd/pipeline/pkg/apis/config"
- "k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
)
@@ -32,56 +31,50 @@ func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
- if apis.IsInCreate(ctx) && ref.Bundle != "" {
- errs = errs.Also(apis.ErrDisallowedFields("bundle"))
- }
- switch {
- case ref.Resolver != "" || ref.Params != nil:
+
+ if ref.Resolver != "" || ref.Params != nil {
+ if ref.Resolver != "" {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
+ }
+ if ref.Bundle != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver"))
+ }
+ }
if ref.Params != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if ref.Name != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
+ if ref.Bundle != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("bundle", "params"))
+ }
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, ref.Params))
}
- if ref.Resolver != "" {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
- if ref.Name != "" {
- // make sure that the name is url-like.
- err := RefNameLikeUrl(ref.Name)
- if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- // If name is url-like then concise resolver syntax must be enabled
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- if err != nil {
- errs = errs.Also(apis.ErrInvalidValue(err, "name"))
- }
- }
+ } else {
+ if ref.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("name"))
}
- case ref.Name != "":
- // ref name can be a Url-like format.
- if err := RefNameLikeUrl(ref.Name); err == nil {
- // If name is url-like then concise resolver syntax must be enabled
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- // In stage1 of concise remote resolvers syntax, this is a required field.
- // TODO: remove this check when implementing stage 2 where this is optional.
- if ref.Resolver == "" {
- errs = errs.Also(apis.ErrMissingField("resolver"))
- }
- // Or, it must be a valid k8s name
- } else {
- // ref name must be a valid k8s name
- if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
- errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
+ if ref.Bundle != "" {
+ errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle"))
+ if _, err := name.ParseReference(ref.Bundle); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error()))
}
}
- default:
- errs = errs.Also(apis.ErrMissingField("name"))
}
return //nolint:nakedret
}
+
+func validateBundleFeatureFlag(ctx context.Context, featureName string, wantValue bool) *apis.FieldError {
+ flagValue := config.FromContextOrDefaults(ctx).FeatureFlags.EnableTektonOCIBundles
+ if flagValue != wantValue {
+ var errs *apis.FieldError
+ message := fmt.Sprintf(`%s requires "enable-tekton-oci-bundles" feature gate to be %t but it is %t`, featureName, wantValue, flagValue)
+ return errs.Also(apis.ErrGeneric(message))
+ }
+ return nil
+}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation_test.go
index 27842341476..82549d20ca9 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelineref_validation_test.go
@@ -21,10 +21,14 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
+ "github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/test/diff"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
+ logtesting "knative.dev/pkg/logging/testing"
)
func TestPipelineRef_Invalid(t *testing.T) {
@@ -34,16 +38,31 @@ func TestPipelineRef_Invalid(t *testing.T) {
wantErr *apis.FieldError
withContext func(context.Context) context.Context
}{{
+ name: "use of bundle without the feature flag set",
+ ref: &v1beta1.PipelineRef{
+ Name: "my-pipeline",
+ Bundle: "docker.io/foo",
+ },
+ wantErr: apis.ErrGeneric("bundle requires \"enable-tekton-oci-bundles\" feature gate to be true but it is false"),
+ }, {
+ name: "bundle missing name",
+ ref: &v1beta1.PipelineRef{
+ Bundle: "docker.io/foo",
+ },
+ wantErr: apis.ErrMissingField("name"),
+ withContext: enableTektonOCIBundles(t),
+ }, {
+ name: "invalid bundle reference",
+ ref: &v1beta1.PipelineRef{
+ Name: "my-pipeline",
+ Bundle: "not a valid reference",
+ },
+ wantErr: apis.ErrInvalidValue("invalid bundle reference", "bundle", "could not parse reference: not a valid reference"),
+ withContext: enableTektonOCIBundles(t),
+ }, {
name: "pipelineRef without Pipeline Name",
ref: &v1beta1.PipelineRef{},
wantErr: apis.ErrMissingField("name"),
- }, {
- name: "invalid pipelineref name",
- ref: &v1beta1.PipelineRef{Name: "_foo"},
- wantErr: &apis.FieldError{
- Message: `invalid value: name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')`,
- Paths: []string{"name"},
- },
}, {
name: "pipelineref resolver disallowed without beta feature gate",
ref: &v1beta1.PipelineRef{
@@ -72,45 +91,57 @@ func TestPipelineRef_Invalid(t *testing.T) {
wantErr: apis.ErrMissingField("resolver"),
withContext: cfgtesting.EnableBetaAPIFields,
}, {
- name: "pipelineRef with resolver and k8s style name",
+ name: "pipelineref resolver disallowed in conjunction with pipelineref name",
ref: &v1beta1.PipelineRef{
Name: "foo",
ResolverRef: v1beta1.ResolverRef{
- Resolver: "git",
+ Resolver: "bar",
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- withContext: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
+ withContext: cfgtesting.EnableBetaAPIFields,
}, {
- name: "pipelineRef with url-like name without resolver",
+ name: "pipelineref resolver disallowed in conjunction with pipelineref bundle",
ref: &v1beta1.PipelineRef{
- Name: "https://foo.com/bar",
+ Bundle: "foo",
+ ResolverRef: v1beta1.ResolverRef{
+ Resolver: "baz",
+ },
},
- wantErr: apis.ErrMissingField("resolver"),
- withContext: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("bundle", "resolver"),
+ withContext: enableTektonOCIBundles(t),
}, {
- name: "pipelineRef params disallowed in conjunction with pipelineref name",
+ name: "pipelineref params disallowed in conjunction with pipelineref name",
ref: &v1beta1.PipelineRef{
- Name: "https://foo/bar",
+ Name: "bar",
ResolverRef: v1beta1.ResolverRef{
- Resolver: "git",
- Params: []v1beta1.Param{{Name: "foo", Value: v1beta1.ParamValue{StringVal: "bar"}}},
+ Params: v1beta1.Params{{
+ Name: "foo",
+ Value: v1beta1.ParamValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- withContext: enableConciseResolverSyntax,
- }, {
- name: "pipelineRef with url-like name without enable-concise-resolver-syntax",
- ref: &v1beta1.PipelineRef{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
+ withContext: cfgtesting.EnableBetaAPIFields,
}, {
- name: "pipelineRef without enable-concise-resolver-syntax",
- ref: &v1beta1.PipelineRef{Name: "https://foo.com/bar", ResolverRef: v1beta1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
+ name: "pipelineref params disallowed in conjunction with pipelineref bundle",
+ ref: &v1beta1.PipelineRef{
+ Bundle: "bar",
+ ResolverRef: v1beta1.ResolverRef{
+ Params: v1beta1.Params{{
+ Name: "foo",
+ Value: v1beta1.ParamValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
+ },
},
+ wantErr: apis.ErrMultipleOneOf("bundle", "params").Also(apis.ErrMissingField("resolver")),
+ withContext: enableTektonOCIBundles(t),
}}
for _, tc := range tests {
@@ -173,3 +204,17 @@ func TestPipelineRef_Valid(t *testing.T) {
})
}
}
+
+func enableTektonOCIBundles(t *testing.T) func(context.Context) context.Context {
+ t.Helper()
+ return func(ctx context.Context) context.Context {
+ s := config.NewStore(logtesting.TestLogger(t))
+ s.OnConfigChanged(&corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName()},
+ Data: map[string]string{
+ "enable-tekton-oci-bundles": "true",
+ },
+ })
+ return s.ToContext(ctx)
+ }
+}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_conversion_test.go b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_conversion_test.go
index d0812253a8e..04397c0708b 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_conversion_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_conversion_test.go
@@ -88,14 +88,13 @@ var (
},
},
},
- Sidecars: []v1beta1.SidecarState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 1,
- Reason: "Error",
- Message: "Error",
- },
+ Sidecars: []v1beta1.SidecarState{{ContainerState: corev1.ContainerState{
+ Terminated: &corev1.ContainerStateTerminated{
+ ExitCode: 1,
+ Reason: "Error",
+ Message: "Error",
},
+ },
Name: "error",
ImageID: "image-id",
ContainerName: "sidecar-error",
@@ -269,21 +268,17 @@ func TestPipelineRunConversion(t *testing.T) {
},
HostNetwork: false,
},
- StepOverrides: []v1beta1.TaskRunStepOverride{
- {
- Name: "test-so",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "test-so",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
- SidecarOverrides: []v1beta1.TaskRunSidecarOverride{
- {
- Name: "test-so",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Name: "test-so",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
Metadata: &v1beta1.PipelineTaskMetadata{
Labels: map[string]string{
@@ -320,8 +315,7 @@ func TestPipelineRunConversion(t *testing.T) {
Value: *v1beta1.NewObject(map[string]string{
"pkey1": "val1",
"pkey2": "rae",
- }),
- }, {
+ })}, {
Name: "pipeline-result-2",
Value: *v1beta1.NewObject(map[string]string{
"pkey1": "val2",
@@ -466,6 +460,38 @@ func TestPipelineRunConversionFromDeprecated(t *testing.T) {
},
},
},
+ }, {
+ name: "bundle",
+ in: &v1beta1.PipelineRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.PipelineRunSpec{
+ PipelineRef: &v1beta1.PipelineRef{
+ Name: "test-bundle-name",
+ Bundle: "test-bundle",
+ },
+ },
+ },
+ want: &v1beta1.PipelineRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.PipelineRunSpec{
+ PipelineRef: &v1beta1.PipelineRef{
+ ResolverRef: v1beta1.ResolverRef{
+ Resolver: "bundles",
+ Params: v1beta1.Params{
+ {Name: "bundle", Value: v1beta1.ParamValue{StringVal: "test-bundle", Type: "string"}},
+ {Name: "name", Value: v1beta1.ParamValue{StringVal: "test-bundle-name", Type: "string"}},
+ {Name: "kind", Value: v1beta1.ParamValue{StringVal: "Pipeline", Type: "string"}},
+ },
+ },
+ },
+ },
+ },
}}
for _, test := range tests {
versions := []apis.Convertible{&v1.PipelineRun{}}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go
index 834c7493df4..7113b653620 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go
@@ -26,7 +26,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/validate"
"github.com/tektoncd/pipeline/pkg/internal/resultref"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
@@ -61,9 +60,6 @@ func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError {
// Validate pipelinerun spec
func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
- // Validate the spec changes
- errs = errs.Also(ps.ValidateUpdate(ctx))
-
// Must have exactly one of pipelineRef and pipelineSpec.
if ps.PipelineRef == nil && ps.PipelineSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("pipelineRef", "pipelineSpec"))
@@ -149,31 +145,6 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError)
return errs
}
-// ValidateUpdate validates the update of a PipelineRunSpec
-func (ps *PipelineRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
- if !apis.IsInUpdate(ctx) {
- return
- }
- oldObj, ok := apis.GetBaseline(ctx).(*PipelineRun)
- if !ok || oldObj == nil {
- return
- }
- old := &oldObj.Spec
-
- // If already in the done state, the spec cannot be modified. Otherwise, only the status field can be modified.
- tips := "Once the PipelineRun is complete, no updates are allowed"
- if !oldObj.IsDone() {
- old = old.DeepCopy()
- old.Status = ps.Status
- tips = "Once the PipelineRun has started, only status updates are allowed"
- }
- if !equality.Semantic.DeepEqual(old, ps) {
- errs = errs.Also(apis.ErrInvalidValue(tips, ""))
- }
-
- return
-}
-
func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) {
if len(ps.Params) == 0 {
return errs
@@ -364,11 +335,11 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM
func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec) (errs *apis.FieldError) {
if trs.StepOverrides != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.BetaAPIFields).ViaField("stepOverrides"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides"))
errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides"))
}
if trs.SidecarOverrides != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.BetaAPIFields).ViaField("sidecarOverrides"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides"))
errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides"))
}
if trs.ComputeResources != nil {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go
index 1d7199fddee..15e044591a4 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/pipelinerun_validation_test.go
@@ -22,7 +22,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
@@ -32,7 +31,6 @@ import (
corev1resources "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
- duckv1 "knative.dev/pkg/apis/duck/v1"
)
func TestPipelineRun_Invalid(t *testing.T) {
@@ -507,21 +505,6 @@ func TestPipelineRun_Invalid(t *testing.T) {
},
},
want: &apis.FieldError{Message: "must not set the field(s)", Paths: []string{"spec.resources"}},
- }, {
- name: "uses bundle (deprecated) on creation is disallowed",
- pr: v1beta1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pipelinerunname",
- },
- Spec: v1beta1.PipelineRunSpec{
- PipelineRef: &v1beta1.PipelineRef{
- Name: "foo",
- Bundle: "example.com/foo/bar",
- },
- },
- },
- want: &apis.FieldError{Message: "must not set the field(s)", Paths: []string{"spec.pipelineRef.bundle"}},
- wc: apis.WithinCreate,
}}
for _, tc := range tests {
@@ -951,7 +934,7 @@ func TestPipelineRun_Validate(t *testing.T) {
},
},
}, {
- name: "beta feature: sidecar and step overrides",
+ name: "alpha feature: sidecar and step overrides",
pr: v1beta1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{
Name: "pr",
@@ -961,27 +944,23 @@ func TestPipelineRun_Validate(t *testing.T) {
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "bar",
- StepOverrides: []v1beta1.TaskRunStepOverride{
- {
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
- SidecarOverrides: []v1beta1.TaskRunSidecarOverride{
- {
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
},
},
},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}}
for _, ts := range tests {
@@ -1015,8 +994,7 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
TaskRef: &v1beta1.TaskRef{
Name: "mytask",
},
- }},
- },
+ }}},
},
wantErr: apis.ErrMultipleOneOf("pipelineRef", "pipelineSpec"),
}, {
@@ -1111,45 +1089,41 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
wantErr: apis.ErrMultipleOneOf("taskRunSpecs[0].stepOverrides[1].name"),
withContext: cfgtesting.EnableAlphaAPIFields,
}, {
- name: "stepOverride disallowed without alpha or beta feature gate",
+ name: "stepOverride disallowed without alpha feature gate",
spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{Name: "foo"},
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "bar",
- StepOverrides: []v1beta1.TaskRunStepOverride{
- {
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
},
},
},
withContext: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("stepOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
+ wantErr: apis.ErrGeneric("stepOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
}, {
- name: "sidecarOverride disallowed without alpha or beta feature gate",
+ name: "sidecarOverride disallowed without alpha feature gate",
spec: v1beta1.PipelineRunSpec{
PipelineRef: &v1beta1.PipelineRef{Name: "foo"},
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "bar",
- SidecarOverrides: []v1beta1.TaskRunSidecarOverride{
- {
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
},
},
},
withContext: cfgtesting.EnableStableAPIFields,
- wantErr: apis.ErrGeneric("sidecarOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" or \"beta\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
+ wantErr: apis.ErrGeneric("sidecarOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\"").ViaIndex(0).ViaField("taskRunSpecs"),
}, {
name: "missing stepOverride name",
spec: v1beta1.PipelineRunSpec{
@@ -1157,12 +1131,10 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "bar",
- StepOverrides: []v1beta1.TaskRunStepOverride{
- {
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
},
},
@@ -1191,12 +1163,10 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "bar",
- SidecarOverrides: []v1beta1.TaskRunSidecarOverride{
- {
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
},
},
@@ -1210,13 +1180,11 @@ func TestPipelineRunSpec_Invalidate(t *testing.T) {
TaskRunSpecs: []v1beta1.PipelineTaskRunSpec{
{
PipelineTaskName: "pipelineTask",
- StepOverrides: []v1beta1.TaskRunStepOverride{
- {
- Name: "stepOverride",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- },
- },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "stepOverride",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
},
ComputeResources: &corev1.ResourceRequirements{
Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("2Gi")},
@@ -1694,179 +1662,3 @@ func TestPipelineRunSpecBetaFeatures(t *testing.T) {
})
}
}
-func TestPipelineRunSpec_ValidateUpdate(t *testing.T) {
- tests := []struct {
- name string
- isCreate bool
- isUpdate bool
- baselinePipelineRun *v1beta1.PipelineRun
- pipelineRun *v1beta1.PipelineRun
- expectedError apis.FieldError
- }{
- {
- name: "is create ctx",
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{},
- },
- isCreate: true,
- isUpdate: false,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, no changes",
- baselinePipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "",
- },
- },
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is nil, skip validation",
- baselinePipelineRun: nil,
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Timeouts: &v1beta1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, status changes from Empty to Cancelled",
- baselinePipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "",
- },
- Status: v1beta1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "Cancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, timeouts changes",
- baselinePipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "",
- Timeouts: &v1beta1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 0},
- },
- },
- Status: v1beta1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Timeouts: &v1beta1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun has started, only status updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is unknown, status changes from PipelineRunPending to Empty, and timeouts changes",
- baselinePipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "PipelineRunPending",
- Timeouts: &v1beta1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 0},
- },
- },
- Status: v1beta1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "",
- Timeouts: &v1beta1.TimeoutFields{
- Pipeline: &metav1.Duration{Duration: 1},
- },
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun has started, only status updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is done, status changes",
- baselinePipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "PipelineRunPending",
- },
- Status: v1beta1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue},
- },
- },
- },
- },
- pipelineRun: &v1beta1.PipelineRun{
- Spec: v1beta1.PipelineRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the PipelineRun is complete, no updates are allowed`,
- Paths: []string{""},
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{},
- Defaults: &config.Defaults{},
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselinePipelineRun)
- }
- pr := tt.pipelineRun
- err := pr.Spec.ValidateUpdate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("PipelineRunSpec.ValidateUpdate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/register.go b/upstream/pkg/apis/pipeline/v1beta1/register.go
index c33fa8a848b..6154ae5dad5 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/register.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/register.go
@@ -58,8 +58,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&PipelineRunList{},
&CustomRun{},
&CustomRunList{},
- &StepAction{},
- &StepActionList{},
)
// &Condition{},
// &ConditionList{},
diff --git a/upstream/pkg/apis/pipeline/v1beta1/resultref_test.go b/upstream/pkg/apis/pipeline/v1beta1/resultref_test.go
index 628219a393b..aa46dd09a7f 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/resultref_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/resultref_test.go
@@ -542,7 +542,7 @@ func TestHasResultReferenceWhenExpression(t *testing.T) {
}
got := v1beta1.NewResultRefs(expressions)
if d := cmp.Diff(tt.wantRef, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -651,8 +651,7 @@ func TestPipelineTaskResultRefs(t *testing.T) {
Value: *v1beta1.NewStructuredValues("$(tasks.pt5.results.r5)", "$(tasks.pt6.results.r6)"),
}, {
Value: *v1beta1.NewStructuredValues("$(tasks.pt7.results.r7)", "$(tasks.pt8.results.r8)"),
- }},
- },
+ }}},
}
refs := v1beta1.PipelineTaskResultRefs(&pt)
expectedRefs := []*v1beta1.ResultRef{{
@@ -697,12 +696,11 @@ func TestParseResultName(t *testing.T) {
name string
input string
want []string
- }{
- {
- name: "array indexing",
- input: "anArrayResult[1]",
- want: []string{"anArrayResult", "1"},
- },
+ }{{
+ name: "array indexing",
+ input: "anArrayResult[1]",
+ want: []string{"anArrayResult", "1"},
+ },
{
name: "array star reference",
input: "anArrayResult[*]",
@@ -724,38 +722,37 @@ func TestGetVarSubstitutionExpressionsForPipelineResult(t *testing.T) {
name string
result v1beta1.PipelineResult
want []string
- }{
- {
- name: "get string result expressions",
- result: v1beta1.PipelineResult{
- Name: "string result",
- Type: v1beta1.ResultsTypeString,
- Value: *v1beta1.NewStructuredValues("$(tasks.task1.results.result1) and $(tasks.task2.results.result2)"),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
- }, {
- name: "get array result expressions",
- result: v1beta1.PipelineResult{
- Name: "array result",
- Type: v1beta1.ResultsTypeString,
- Value: *v1beta1.NewStructuredValues("$(tasks.task1.results.result1)", "$(tasks.task2.results.result2)"),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
- }, {
- name: "get object result expressions",
- result: v1beta1.PipelineResult{
- Name: "object result",
- Type: v1beta1.ResultsTypeString,
- Value: *v1beta1.NewObject(map[string]string{
- "key1": "$(tasks.task1.results.result1)",
- "key2": "$(tasks.task2.results.result2) and another one $(tasks.task3.results.result3)",
- "key3": "no ref here",
- }),
- },
- want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2", "tasks.task3.results.result3"},
+ }{{
+ name: "get string result expressions",
+ result: v1beta1.PipelineResult{
+ Name: "string result",
+ Type: v1beta1.ResultsTypeString,
+ Value: *v1beta1.NewStructuredValues("$(tasks.task1.results.result1) and $(tasks.task2.results.result2)"),
+ },
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
+ }, {
+ name: "get array result expressions",
+ result: v1beta1.PipelineResult{
+ Name: "array result",
+ Type: v1beta1.ResultsTypeString,
+ Value: *v1beta1.NewStructuredValues("$(tasks.task1.results.result1)", "$(tasks.task2.results.result2)"),
+ },
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2"},
+ }, {
+ name: "get object result expressions",
+ result: v1beta1.PipelineResult{
+ Name: "object result",
+ Type: v1beta1.ResultsTypeString,
+ Value: *v1beta1.NewObject(map[string]string{
+ "key1": "$(tasks.task1.results.result1)",
+ "key2": "$(tasks.task2.results.result2) and another one $(tasks.task3.results.result3)",
+ "key3": "no ref here",
+ }),
},
+ want: []string{"tasks.task1.results.result1", "tasks.task2.results.result2", "tasks.task3.results.result3"},
+ },
}
- sortStrings := func(x, y string) bool {
+ var sortStrings = func(x, y string) bool {
return x < y
}
for _, tt := range tests {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/stepaction_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/stepaction_conversion.go
deleted file mode 100644
index 6d8afd26f33..00000000000
--- a/upstream/pkg/apis/pipeline/v1beta1/stepaction_conversion.go
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-Copyright 2023 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "context"
-
- "knative.dev/pkg/apis"
-)
-
-var _ apis.Convertible = (*StepAction)(nil)
-
-// ConvertTo implements apis.Convertible
-func (s *StepAction) ConvertTo(ctx context.Context, to apis.Convertible) error {
- return nil
-}
-
-// ConvertTo implements apis.Convertible
-func (ss *StepActionSpec) ConvertTo(ctx context.Context, sink *StepActionSpec) error {
- return nil
-}
-
-// ConvertFrom implements apis.Convertible
-func (s *StepAction) ConvertFrom(ctx context.Context, from apis.Convertible) error {
- return nil
-}
-
-// ConvertFrom implements apis.Convertible
-func (ss *StepActionSpec) ConvertFrom(ctx context.Context, source *StepActionSpec) error {
- return nil
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/stepaction_defaults.go b/upstream/pkg/apis/pipeline/v1beta1/stepaction_defaults.go
deleted file mode 100644
index 0274a66bc2e..00000000000
--- a/upstream/pkg/apis/pipeline/v1beta1/stepaction_defaults.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2023 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "context"
-
- "knative.dev/pkg/apis"
-)
-
-var _ apis.Defaultable = (*StepAction)(nil)
-
-// SetDefaults implements apis.Defaultable
-func (s *StepAction) SetDefaults(ctx context.Context) {
- s.Spec.SetDefaults(ctx)
-}
-
-// SetDefaults set any defaults for the StepAction spec
-func (ss *StepActionSpec) SetDefaults(ctx context.Context) {
- for i := range ss.Params {
- ss.Params[i].SetDefaults(ctx)
- }
- for i := range ss.Results {
- ss.Results[i].SetDefaults(ctx)
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/stepaction_types.go b/upstream/pkg/apis/pipeline/v1beta1/stepaction_types.go
deleted file mode 100644
index ab000a6db01..00000000000
--- a/upstream/pkg/apis/pipeline/v1beta1/stepaction_types.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-Copyright 2023 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/internal/checksum"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "knative.dev/pkg/apis"
- "knative.dev/pkg/kmeta"
-)
-
-// +genclient
-// +genclient:noStatus
-// +genreconciler:krshapedlogic=false
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// StepAction represents the actionable components of Step.
-// The Step can only reference it from the cluster or using remote resolution.
-//
-// +k8s:openapi-gen=true
-type StepAction struct {
- metav1.TypeMeta `json:",inline"`
- // +optional
- metav1.ObjectMeta `json:"metadata"`
-
- // Spec holds the desired state of the Step from the client
- // +optional
- Spec StepActionSpec `json:"spec"`
-}
-
-var _ kmeta.OwnerRefable = (*StepAction)(nil)
-
-// StepAction returns the step action's spec
-func (s *StepAction) StepActionSpec() StepActionSpec {
- return s.Spec
-}
-
-// StepActionMetadata returns the step action's ObjectMeta
-func (s *StepAction) StepActionMetadata() metav1.ObjectMeta {
- return s.ObjectMeta
-}
-
-// Copy returns a deep copy of the stepaction
-func (s *StepAction) Copy() StepActionObject {
- return s.DeepCopy()
-}
-
-// GetGroupVersionKind implements kmeta.OwnerRefable.
-func (*StepAction) GetGroupVersionKind() schema.GroupVersionKind {
- return SchemeGroupVersion.WithKind("StepAction")
-}
-
-// Checksum computes the sha256 checksum of the stepaction object.
-// Prior to computing the checksum, it performs some preprocessing on the
-// metadata of the object where it removes system provided annotations.
-// Only the name, namespace, generateName, user-provided labels and annotations
-// and the taskSpec are included for the checksum computation.
-func (s *StepAction) Checksum() ([]byte, error) {
- objectMeta := checksum.PrepareObjectMeta(s)
- preprocessedStepaction := StepAction{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "tekton.dev/v1beta1",
- Kind: "StepAction",
- },
- ObjectMeta: objectMeta,
- Spec: s.Spec,
- }
- sha256Checksum, err := checksum.ComputeSha256Checksum(preprocessedStepaction)
- if err != nil {
- return nil, err
- }
- return sha256Checksum, nil
-}
-
-// StepActionList contains a list of StepActions
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type StepActionList struct {
- metav1.TypeMeta `json:",inline"`
- // +optional
- metav1.ListMeta `json:"metadata,omitempty"`
- Items []StepAction `json:"items"`
-}
-
-// StepActionSpec contains the actionable components of a step.
-type StepActionSpec struct {
- // Description is a user-facing description of the stepaction that may be
- // used to populate a UI.
- // +optional
- Description string `json:"description,omitempty"`
- // Image reference name to run for this StepAction.
- // More info: https://kubernetes.io/docs/concepts/containers/images
- // +optional
- Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
- // Entrypoint array. Not executed within a shell.
- // The image's ENTRYPOINT is used if this is not provided.
- // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
- // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
- // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
- // of whether the variable exists or not. Cannot be updated.
- // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
- // +optional
- // +listType=atomic
- Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
- // Arguments to the entrypoint.
- // The image's CMD is used if this is not provided.
- // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
- // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
- // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
- // of whether the variable exists or not. Cannot be updated.
- // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
- // +optional
- // +listType=atomic
- Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
- // List of environment variables to set in the container.
- // Cannot be updated.
- // +optional
- // +patchMergeKey=name
- // +patchStrategy=merge
- // +listType=atomic
- Env []corev1.EnvVar `json:"env,omitempty" patchMergeKey:"name" patchStrategy:"merge" protobuf:"bytes,7,rep,name=env"`
- // Script is the contents of an executable file to execute.
- //
- // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.
- // +optional
- Script string `json:"script,omitempty"`
- // Step's working directory.
- // If not specified, the container runtime's default will be used, which
- // might be configured in the container image.
- // Cannot be updated.
- // +optional
- WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
- // Params is a list of input parameters required to run the stepAction.
- // Params must be supplied as inputs in Steps unless they declare a defaultvalue.
- // +optional
- // +listType=atomic
- Params v1.ParamSpecs `json:"params,omitempty"`
- // Results are values that this StepAction can output
- // +optional
- // +listType=atomic
- Results []v1.StepResult `json:"results,omitempty"`
- // SecurityContext defines the security options the Step should be run with.
- // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
- // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- // The value set in StepAction will take precedence over the value from Task.
- // +optional
- SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
- // Volumes to mount into the Step's filesystem.
- // Cannot be updated.
- // +optional
- // +patchMergeKey=mountPath
- // +patchStrategy=merge
- // +listType=atomic
- VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchMergeKey:"mountPath" patchStrategy:"merge" protobuf:"bytes,9,rep,name=volumeMounts"`
-}
-
-// ToStep converts the StepActionSpec to a Step struct
-func (ss *StepActionSpec) ToStep() *v1.Step {
- return &v1.Step{
- Image: ss.Image,
- Command: ss.Command,
- Args: ss.Args,
- WorkingDir: ss.WorkingDir,
- Script: ss.Script,
- Env: ss.Env,
- VolumeMounts: ss.VolumeMounts,
- SecurityContext: ss.SecurityContext,
- Results: ss.Results,
- }
-}
-
-// StepActionObject is implemented by StepAction
-type StepActionObject interface {
- apis.Defaultable
- StepActionMetadata() metav1.ObjectMeta
- StepActionSpec() StepActionSpec
- Copy() StepActionObject
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation.go b/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation.go
deleted file mode 100644
index 0955c7e4f70..00000000000
--- a/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation.go
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
-Copyright 2023 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
- "context"
- "strings"
-
- "github.com/tektoncd/pipeline/pkg/apis/config"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/validate"
- "github.com/tektoncd/pipeline/pkg/substitution"
- admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/sets"
- "knative.dev/pkg/apis"
- "knative.dev/pkg/webhook/resourcesemantics"
-)
-
-var (
- _ apis.Validatable = (*StepAction)(nil)
- _ resourcesemantics.VerbLimited = (*StepAction)(nil)
-)
-
-// SupportedVerbs returns the operations that validation should be called for
-func (s *StepAction) SupportedVerbs() []admissionregistrationv1.OperationType {
- return []admissionregistrationv1.OperationType{admissionregistrationv1.Create, admissionregistrationv1.Update}
-}
-
-// Validate implements apis.Validatable
-func (s *StepAction) Validate(ctx context.Context) (errs *apis.FieldError) {
- errs = validate.ObjectMetadata(s.GetObjectMeta()).ViaField("metadata")
- errs = errs.Also(s.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec"))
- return errs
-}
-
-// Validate implements apis.Validatable
-func (ss *StepActionSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
- if ss.Image == "" {
- errs = errs.Also(apis.ErrMissingField("Image"))
- }
-
- if ss.Script != "" {
- if len(ss.Command) > 0 {
- errs = errs.Also(&apis.FieldError{
- Message: "script cannot be used with command",
- Paths: []string{"script"},
- })
- }
-
- cleaned := strings.TrimSpace(ss.Script)
- if strings.HasPrefix(cleaned, "#!win") {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script"))
- }
- errs = errs.Also(validateNoParamSubstitutionsInScript(ss.Script))
- }
- errs = errs.Also(validateUsageOfDeclaredParameters(ctx, *ss))
- errs = errs.Also(v1.ValidateParameterTypes(ctx, ss.Params).ViaField("params"))
- errs = errs.Also(validateParameterVariables(ctx, *ss, ss.Params))
- errs = errs.Also(v1.ValidateStepResultsVariables(ctx, ss.Results, ss.Script))
- errs = errs.Also(v1.ValidateStepResults(ctx, ss.Results).ViaField("results"))
- errs = errs.Also(validateVolumeMounts(ss.VolumeMounts, ss.Params).ViaField("volumeMounts"))
- return errs
-}
-
-// validateNoParamSubstitutionsInScript validates that param substitutions are not invoked in the script
-func validateNoParamSubstitutionsInScript(script string) *apis.FieldError {
- _, present, errString := substitution.ExtractVariablesFromString(script, "params")
- if errString != "" || present {
- return &apis.FieldError{
- Message: "param substitution in scripts is not allowed.",
- Paths: []string{"script"},
- }
- }
- return nil
-}
-
-// validateUsageOfDeclaredParameters validates that all parameters referenced in the Task are declared by the Task.
-func validateUsageOfDeclaredParameters(ctx context.Context, sas StepActionSpec) *apis.FieldError {
- params := sas.Params
- var errs *apis.FieldError
- _, _, objectParams := params.SortByType()
- allParameterNames := sets.NewString(params.GetNames()...)
- errs = errs.Also(validateStepActionVariables(ctx, sas, "params", allParameterNames))
- errs = errs.Also(ValidateObjectUsage(ctx, sas, objectParams))
- errs = errs.Also(v1.ValidateObjectParamsHaveProperties(ctx, params))
- return errs
-}
-
-func validateVolumeMounts(volumeMounts []corev1.VolumeMount, params v1.ParamSpecs) (errs *apis.FieldError) {
- if len(volumeMounts) == 0 {
- return
- }
- paramNames := sets.String{}
- for _, p := range params {
- paramNames.Insert(p.Name)
- }
- for idx, v := range volumeMounts {
- matches, _ := substitution.ExtractVariableExpressions(v.Name, "params")
- if len(matches) != 1 {
- errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
- return errs
- } else if matches[0] != v.Name {
- errs = errs.Also(apis.ErrInvalidValue(v.Name, "name", "expect the Name to be a single param reference").ViaIndex(idx))
- return errs
- }
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(v.Name, "params", paramNames).ViaIndex(idx))
- }
- return errs
-}
-
-// validateParameterVariables validates all variables within a slice of ParamSpecs against a StepAction
-func validateParameterVariables(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) *apis.FieldError {
- var errs *apis.FieldError
- errs = errs.Also(params.ValidateNoDuplicateNames())
- stringParams, arrayParams, objectParams := params.SortByType()
- stringParameterNames := sets.NewString(stringParams.GetNames()...)
- arrayParameterNames := sets.NewString(arrayParams.GetNames()...)
- errs = errs.Also(v1.ValidateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParams))
- return errs.Also(validateStepActionArrayUsage(sas, "params", arrayParameterNames))
-}
-
-// ValidateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object
-func ValidateObjectUsage(ctx context.Context, sas StepActionSpec, params v1.ParamSpecs) (errs *apis.FieldError) {
- objectParameterNames := sets.NewString()
- for _, p := range params {
- // collect all names of object type params
- objectParameterNames.Insert(p.Name)
-
- // collect all keys for this object param
- objectKeys := sets.NewString()
- for key := range p.Properties {
- objectKeys.Insert(key)
- }
-
- // check if the object's key names are referenced correctly i.e. param.objectParam.key1
- errs = errs.Also(validateStepActionVariables(ctx, sas, "params\\."+p.Name, objectKeys))
- }
-
- return errs.Also(validateStepActionObjectUsageAsWhole(sas, "params", objectParameterNames))
-}
-
-// validateStepActionObjectUsageAsWhole returns an error if the StepAction contains references to the entire input object params in fields where these references are prohibited
-func validateStepActionObjectUsageAsWhole(sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
- errs := substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Image, prefix, vars).ViaField("image")
- errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(sas.Script, prefix, vars).ViaField("script"))
- for i, cmd := range sas.Command {
- errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
- }
- for i, arg := range sas.Args {
- errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(arg, prefix, vars).ViaFieldIndex("args", i))
- }
- for _, env := range sas.Env {
- errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
- }
- for i, vm := range sas.VolumeMounts {
- errs = errs.Also(substitution.ValidateNoReferencesToEntireProhibitedVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
- }
- return errs
-}
-
-// validateStepActionArrayUsage returns an error if the Step contains references to the input array params in fields where these references are prohibited
-func validateStepActionArrayUsage(sas StepActionSpec, prefix string, arrayParamNames sets.String) *apis.FieldError {
- errs := substitution.ValidateNoReferencesToProhibitedVariables(sas.Image, prefix, arrayParamNames).ViaField("image")
- errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(sas.Script, prefix, arrayParamNames).ViaField("script"))
- for i, cmd := range sas.Command {
- errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(cmd, prefix, arrayParamNames).ViaFieldIndex("command", i))
- }
- for i, arg := range sas.Args {
- errs = errs.Also(substitution.ValidateVariableReferenceIsIsolated(arg, prefix, arrayParamNames).ViaFieldIndex("args", i))
- }
- for _, env := range sas.Env {
- errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(env.Value, prefix, arrayParamNames).ViaFieldKey("env", env.Name))
- }
- for i, vm := range sas.VolumeMounts {
- errs = errs.Also(substitution.ValidateNoReferencesToProhibitedVariables(vm.Name, prefix, arrayParamNames).ViaFieldIndex("volumeMounts", i))
- }
- return errs
-}
-
-// validateStepActionVariables returns an error if the StepAction contains references to any unknown variables
-func validateStepActionVariables(ctx context.Context, sas StepActionSpec, prefix string, vars sets.String) *apis.FieldError {
- errs := substitution.ValidateNoReferencesToUnknownVariables(sas.Image, prefix, vars).ViaField("image")
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(sas.Script, prefix, vars).ViaField("script"))
- for i, cmd := range sas.Command {
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(cmd, prefix, vars).ViaFieldIndex("command", i))
- }
- for i, arg := range sas.Args {
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(arg, prefix, vars).ViaFieldIndex("args", i))
- }
- for _, env := range sas.Env {
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(env.Value, prefix, vars).ViaFieldKey("env", env.Name))
- }
- for i, vm := range sas.VolumeMounts {
- errs = errs.Also(substitution.ValidateNoReferencesToUnknownVariables(vm.Name, prefix, vars).ViaFieldIndex("volumeMounts", i))
- }
- return errs
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation_test.go
deleted file mode 100644
index 49efacdf91a..00000000000
--- a/upstream/pkg/apis/pipeline/v1beta1/stepaction_validation_test.go
+++ /dev/null
@@ -1,975 +0,0 @@
-/*
-Copyright 2023 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1_test
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/apis"
-)
-
-func TestStepActionValidate(t *testing.T) {
- tests := []struct {
- name string
- sa *v1beta1.StepAction
- wc func(context.Context) context.Context
- }{{
- name: "valid step action",
- sa: &v1beta1.StepAction{
- ObjectMeta: metav1.ObjectMeta{Name: "stepaction"},
- Spec: v1beta1.StepActionSpec{
- Image: "my-image",
- Script: `
- #!/usr/bin/env bash
- echo hello`,
- },
- },
- }}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := context.Background()
- if tt.wc != nil {
- ctx = tt.wc(ctx)
- }
- err := tt.sa.Validate(ctx)
- if err != nil {
- t.Errorf("StepAction.Validate() returned error for valid StepAction: %v", err)
- }
- })
- }
-}
-
-func TestStepActionSpecValidate(t *testing.T) {
- type fields struct {
- Image string
- Command []string
- Args []string
- Script string
- Env []corev1.EnvVar
- Params []v1.ParamSpec
- Results []v1.StepResult
- VolumeMounts []corev1.VolumeMount
- }
- tests := []struct {
- name string
- fields fields
- }{{
- name: "step action with command",
- fields: fields{
- Image: "myimage",
- Command: []string{"ls"},
- Args: []string{"-lh"},
- },
- }, {
- name: "step action with script",
- fields: fields{
- Image: "myimage",
- Script: "echo hi",
- },
- }, {
- name: "step action with env",
- fields: fields{
- Image: "myimage",
- Script: "echo hi",
- Env: []corev1.EnvVar{{
- Name: "HOME",
- Value: "/tekton/home",
- }},
- },
- }, {
- name: "valid params type explicit",
- fields: fields{
- Image: "myimage",
- Params: []v1.ParamSpec{{
- Name: "stringParam",
- Type: v1.ParamTypeString,
- Description: "param",
- Default: v1.NewStructuredValues("default"),
- }, {
- Name: "objectParam",
- Type: v1.ParamTypeObject,
- Description: "param",
- Properties: map[string]v1.PropertySpec{
- "key1": {},
- "key2": {},
- },
- Default: v1.NewObject(map[string]string{
- "key1": "var1",
- "key2": "var2",
- }),
- }, {
- Name: "objectParamWithoutDefault",
- Type: v1.ParamTypeObject,
- Description: "param",
- Properties: map[string]v1.PropertySpec{
- "key1": {},
- "key2": {},
- },
- }, {
- Name: "objectParamWithDefaultPartialKeys",
- Type: v1.ParamTypeObject,
- Description: "param",
- Properties: map[string]v1.PropertySpec{
- "key1": {},
- "key2": {},
- },
- Default: v1.NewObject(map[string]string{
- "key1": "default",
- }),
- }},
- },
- }, {
- name: "valid string param usage",
- fields: fields{
- Image: "url",
- Params: []v1.ParamSpec{{
- Name: "baz",
- }, {
- Name: "foo-is-baz",
- }},
- Args: []string{"--flag=$(params.baz) && $(params.foo-is-baz)"},
- },
- }, {
- name: "valid array param usage",
- fields: fields{
- Image: "url",
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"$(params.baz)", "middle string", "$(params.foo-is-baz)"},
- },
- }, {
- name: "valid object param usage",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "some-git-image",
- Args: []string{"-url=$(params.gitrepo.url)", "-commit=$(params.gitrepo.commit)"},
- },
- }, {
- name: "valid star array usage",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Image: "myimage",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"$(params.baz[*])", "middle string", "$(params.foo-is-baz[*])"},
- },
- }, {
- name: "valid result",
- fields: fields{
- Image: "my-image",
- Args: []string{"arg"},
- Results: []v1.StepResult{{
- Name: "MY-RESULT",
- Description: "my great result",
- }},
- },
- }, {
- name: "valid result type string",
- fields: fields{
- Image: "my-image",
- Args: []string{"arg"},
- Results: []v1.StepResult{{
- Name: "MY-RESULT",
- Type: "string",
- Description: "my great result",
- }},
- },
- }, {
- name: "valid result type array",
- fields: fields{
- Image: "my-image",
- Args: []string{"arg"},
- Results: []v1.StepResult{{
- Name: "MY-RESULT",
- Type: v1.ResultsTypeArray,
- Description: "my great result",
- }},
- },
- }, {
- name: "valid result type object",
- fields: fields{
- Image: "my-image",
- Args: []string{"arg"},
- Results: []v1.StepResult{{
- Name: "MY-RESULT",
- Type: v1.ResultsTypeObject,
- Description: "my great result",
- Properties: map[string]v1.PropertySpec{
- "url": {Type: "string"},
- "commit": {Type: "string"},
- },
- }},
- },
- }, {
- name: "valid volumeMounts",
- fields: fields{
- Image: "my-image",
- Args: []string{"arg"},
- Params: []v1.ParamSpec{{
- Name: "foo",
- }, {
- Name: "array-params",
- Type: v1.ParamTypeArray,
- }, {
- Name: "object-params",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "key": {Type: "string"},
- },
- },
- },
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.foo)",
- MountPath: "/config",
- }, {
- Name: "$(params.array-params[0])",
- MountPath: "/config",
- }, {
- Name: "$(params.object-params.key)",
- MountPath: "/config",
- }},
- },
- }}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- sa := &v1beta1.StepActionSpec{
- Image: tt.fields.Image,
- Command: tt.fields.Command,
- Args: tt.fields.Args,
- Script: tt.fields.Script,
- Env: tt.fields.Env,
- Params: tt.fields.Params,
- Results: tt.fields.Results,
- VolumeMounts: tt.fields.VolumeMounts,
- }
- ctx := context.Background()
- sa.SetDefaults(ctx)
- if err := sa.Validate(ctx); err != nil {
- t.Errorf("StepActionSpec.Validate() = %v", err)
- }
- })
- }
-}
-
-func TestStepActionValidateError(t *testing.T) {
- type fields struct {
- Image string
- Command []string
- Args []string
- Script string
- Env []corev1.EnvVar
- Params []v1.ParamSpec
- Results []v1.StepResult
- VolumeMounts []corev1.VolumeMount
- }
- tests := []struct {
- name string
- fields fields
- expectedError apis.FieldError
- }{{
- name: "inexistent image field",
- fields: fields{
- Args: []string{"flag"},
- },
- expectedError: apis.FieldError{
- Message: `missing field(s)`,
- Paths: []string{"spec.Image"},
- },
- }, {
- name: "object used in a string field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "$(params.gitrepo)",
- Args: []string{"echo"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo)"`,
- Paths: []string{"spec.image"},
- },
- }, {
- name: "object star used in a string field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "$(params.gitrepo[*])",
- Args: []string{"echo"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo[*])"`,
- Paths: []string{"spec.image"},
- },
- }, {
- name: "object used in a field that can accept array type",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "myimage",
- Args: []string{"$(params.gitrepo)"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo)"`,
- Paths: []string{"spec.args[0]"},
- },
- }, {
- name: "object star used in a field that can accept array type",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "some-git-image",
- Args: []string{"$(params.gitrepo[*])"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo[*])"`,
- Paths: []string{"spec.args[0]"},
- },
- }, {
- name: "non-existent individual key of an object param is used in task step",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "some-git-image",
- Args: []string{"$(params.gitrepo.non-exist-key)"},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.gitrepo.non-exist-key)"`,
- Paths: []string{"spec.args[0]"},
- },
- }, {
- name: "Inexistent param variable with existing",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "foo",
- Description: "param",
- Default: v1.NewStructuredValues("default"),
- }},
- Image: "myimage",
- Args: []string{"$(params.foo) && $(params.inexistent)"},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.foo) && $(params.inexistent)"`,
- Paths: []string{"spec.args[0]"},
- },
- }, {
- name: "invalid param reference in volumeMount.Name - not a param reference",
- fields: fields{
- Image: "myimage",
- Params: []v1.ParamSpec{{
- Name: "foo",
- }},
- VolumeMounts: []corev1.VolumeMount{{
- Name: "params.foo",
- MountPath: "/path",
- }},
- },
- expectedError: apis.FieldError{
- Message: `invalid value: params.foo`,
- Paths: []string{"spec.volumeMounts[0].name"},
- Details: `expect the Name to be a single param reference`,
- },
- }, {
- name: "invalid param reference in volumeMount.Name - nested reference",
- fields: fields{
- Image: "myimage",
- Params: []v1.ParamSpec{{
- Name: "foo",
- }},
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.foo)-foo",
- MountPath: "/path",
- }},
- },
- expectedError: apis.FieldError{
- Message: `invalid value: $(params.foo)-foo`,
- Paths: []string{"spec.volumeMounts[0].name"},
- Details: `expect the Name to be a single param reference`,
- },
- }, {
- name: "invalid param reference in volumeMount.Name - multiple params references",
- fields: fields{
- Image: "myimage",
- Params: []v1.ParamSpec{{
- Name: "foo",
- }, {
- Name: "bar",
- }},
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.foo)$(params.bar)",
- MountPath: "/path",
- }},
- },
- expectedError: apis.FieldError{
- Message: `invalid value: $(params.foo)$(params.bar)`,
- Paths: []string{"spec.volumeMounts[0].name"},
- Details: `expect the Name to be a single param reference`,
- },
- }, {
- name: "invalid param reference in volumeMount.Name - not defined in params",
- fields: fields{
- Image: "myimage",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.foo)",
- MountPath: "/path",
- }},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.foo)"`,
- Paths: []string{"spec.volumeMounts[0]"},
- },
- }, {
- name: "invalid param reference in volumeMount.Name - array used in a volumeMounts name field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeArray,
- }},
- Image: "image",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.gitrepo)",
- }},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo)"`,
- Paths: []string{"spec.volumeMounts[0]"},
- },
- }, {
- name: "invalid param reference in volumeMount.Name - object used in a volumeMounts name field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "image",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.gitrepo)",
- }},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.gitrepo)"`,
- Paths: []string{"spec.volumeMounts[0]"},
- },
- }, {
- name: "invalid param reference in volumeMount.Name - object key not existent in params",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "gitrepo",
- Type: v1.ParamTypeObject,
- Properties: map[string]v1.PropertySpec{
- "url": {},
- "commit": {},
- },
- }},
- Image: "image",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.gitrepo.foo)",
- }},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "$(params.gitrepo.foo)"`,
- Paths: []string{"spec.volumeMounts[0]"},
- },
- }}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- sa := &v1beta1.StepAction{
- ObjectMeta: metav1.ObjectMeta{Name: "foo"},
- Spec: v1beta1.StepActionSpec{
- Image: tt.fields.Image,
- Command: tt.fields.Command,
- Args: tt.fields.Args,
- Script: tt.fields.Script,
- Env: tt.fields.Env,
- Params: tt.fields.Params,
- Results: tt.fields.Results,
- VolumeMounts: tt.fields.VolumeMounts,
- },
- }
- ctx := context.Background()
- sa.SetDefaults(ctx)
- err := sa.Validate(ctx)
- if err == nil {
- t.Fatalf("Expected an error, got nothing for %v", sa)
- }
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("StepActionSpec.Validate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func TestStepActionSpecValidateError(t *testing.T) {
- type fields struct {
- Image string
- Command []string
- Args []string
- Script string
- Env []corev1.EnvVar
- Params []v1.ParamSpec
- Results []v1.StepResult
- }
- tests := []struct {
- name string
- fields fields
- expectedError apis.FieldError
- }{{
- name: "inexistent image field",
- fields: fields{
- Args: []string{"flag"},
- },
- expectedError: apis.FieldError{
- Message: `missing field(s)`,
- Paths: []string{"Image"},
- },
- }, {
- name: "command and script both used.",
- fields: fields{
- Image: "my-image",
- Command: []string{"ls"},
- Script: "echo hi",
- },
- expectedError: apis.FieldError{
- Message: `script cannot be used with command`,
- Paths: []string{"script"},
- },
- }, {
- name: "windows script without alpha.",
- fields: fields{
- Image: "my-image",
- Script: "#!win",
- },
- expectedError: apis.FieldError{
- Message: `windows script support requires "enable-api-fields" feature gate to be "alpha" but it is "beta"`,
- Paths: []string{},
- },
- }, {
- name: "step script refers to nonexistent result",
- fields: fields{
- Image: "my-image",
- Script: `
- #!/usr/bin/env bash
- date | tee $(results.non-exist.path)`,
- Results: []v1.StepResult{{Name: "a-result"}},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "\n\t\t\t#!/usr/bin/env bash\n\t\t\tdate | tee $(results.non-exist.path)"`,
- Paths: []string{"script"},
- },
- }, {
- name: "step script refers to nonexistent stepresult",
- fields: fields{
- Image: "my-image",
- Script: `
- #!/usr/bin/env bash
- date | tee $(step.results.non-exist.path)`,
- Results: []v1.StepResult{{Name: "a-result"}},
- },
- expectedError: apis.FieldError{
- Message: `non-existent variable in "\n\t\t\t#!/usr/bin/env bash\n\t\t\tdate | tee $(step.results.non-exist.path)"`,
- Paths: []string{"script"},
- },
- }, {
- name: "invalid param name format",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "_validparam1",
- Description: "valid param name format",
- }, {
- Name: "valid_param2",
- Description: "valid param name format",
- }, {
- Name: "",
- Description: "invalid param name format",
- }, {
- Name: "a^b",
- Description: "invalid param name format",
- }, {
- Name: "0ab",
- Description: "invalid param name format",
- }, {
- Name: "f oo",
- Description: "invalid param name format",
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", []string{"", "0ab", "a^b", "f oo"}),
- Paths: []string{"params"},
- Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)",
- },
- }, {
- name: "invalid object param format - object param name and key name shouldn't contain dots.",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "invalid.name1",
- Description: "object param name contains dots",
- Properties: map[string]v1.PropertySpec{
- "invalid.key1": {},
- "mykey2": {},
- },
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: fmt.Sprintf("Object param name and key name format is invalid: %v", map[string][]string{
- "invalid.name1": {"invalid.key1"},
- }),
- Paths: []string{"params"},
- Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)",
- },
- }, {
- name: "duplicated param names",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "foo",
- Type: v1.ParamTypeString,
- Description: "parameter",
- Default: v1.NewStructuredValues("value1"),
- }, {
- Name: "foo",
- Type: v1.ParamTypeString,
- Description: "parameter",
- Default: v1.NewStructuredValues("value2"),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `parameter appears more than once`,
- Paths: []string{"params[foo]"},
- },
- }, {
- name: "invalid param type",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "validparam",
- Type: v1.ParamTypeString,
- Description: "parameter",
- Default: v1.NewStructuredValues("default"),
- }, {
- Name: "param-with-invalid-type",
- Type: "invalidtype",
- Description: "invalidtypedesc",
- Default: v1.NewStructuredValues("default"),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `invalid value: invalidtype`,
- Paths: []string{"params.param-with-invalid-type.type"},
- },
- }, {
- name: "param mismatching default/type 1",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeArray,
- Description: "param",
- Default: v1.NewStructuredValues("default"),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `"array" type does not match default value's type: "string"`,
- Paths: []string{"params.task.type", "params.task.default.type"},
- },
- }, {
- name: "param mismatching default/type 2",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeString,
- Description: "param",
- Default: v1.NewStructuredValues("default", "array"),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `"string" type does not match default value's type: "array"`,
- Paths: []string{"params.task.type", "params.task.default.type"},
- },
- }, {
- name: "param mismatching default/type 3",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeArray,
- Description: "param",
- Default: v1.NewObject(map[string]string{
- "key1": "var1",
- "key2": "var2",
- }),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `"array" type does not match default value's type: "object"`,
- Paths: []string{"params.task.type", "params.task.default.type"},
- },
- }, {
- name: "param mismatching default/type 4",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeObject,
- Description: "param",
- Properties: map[string]v1.PropertySpec{"key1": {}},
- Default: v1.NewStructuredValues("var"),
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: `"object" type does not match default value's type: "string"`,
- Paths: []string{"params.task.type", "params.task.default.type"},
- },
- }, {
- name: "PropertySpec type is set with unsupported type",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeObject,
- Description: "param",
- Properties: map[string]v1.PropertySpec{
- "key1": {Type: "number"},
- "key2": {Type: "string"},
- },
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: fmt.Sprintf("The value type specified for these keys %v is invalid", []string{"key1"}),
- Paths: []string{"params.task.properties"},
- },
- }, {
- name: "Properties is missing",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "task",
- Type: v1.ParamTypeObject,
- Description: "param",
- }},
- Image: "myImage",
- },
- expectedError: apis.FieldError{
- Message: "missing field(s)",
- Paths: []string{"task.properties"},
- },
- }, {
- name: "array used in unaccepted field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Image: "$(params.baz)",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"$(params.baz)", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.baz)"`,
- Paths: []string{"image"},
- },
- }, {
- name: "array star used in unaccepted field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Image: "$(params.baz[*])",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"$(params.baz)", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable type invalid in "$(params.baz[*])"`,
- Paths: []string{"image"},
- },
- }, {
- name: "array not properly isolated",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Image: "someimage",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"not isolated: $(params.baz)", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable is not properly isolated in "not isolated: $(params.baz)"`,
- Paths: []string{"args[0]"},
- },
- }, {
- name: "array star not properly isolated",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeArray,
- }},
- Image: "someimage",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"not isolated: $(params.baz[*])", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`,
- Paths: []string{"args[0]"},
- },
- }, {
- name: "inferred array not properly isolated",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Default: v1.NewStructuredValues("implied", "array", "type"),
- }, {
- Name: "foo-is-baz",
- Default: v1.NewStructuredValues("implied", "array", "type"),
- }},
- Image: "someimage",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"not isolated: $(params.baz)", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable is not properly isolated in "not isolated: $(params.baz)"`,
- Paths: []string{"args[0]"},
- },
- }, {
- name: "inferred array star not properly isolated",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Default: v1.NewStructuredValues("implied", "array", "type"),
- }, {
- Name: "foo-is-baz",
- Default: v1.NewStructuredValues("implied", "array", "type"),
- }},
- Image: "someimage",
- Command: []string{"$(params.foo-is-baz)"},
- Args: []string{"not isolated: $(params.baz[*])", "middle string", "url"},
- },
- expectedError: apis.FieldError{
- Message: `variable is not properly isolated in "not isolated: $(params.baz[*])"`,
- Paths: []string{"args[0]"},
- },
- }, {
- name: "params used in script field",
- fields: fields{
- Params: []v1.ParamSpec{{
- Name: "baz",
- Type: v1.ParamTypeArray,
- }, {
- Name: "foo-is-baz",
- Type: v1.ParamTypeString,
- }},
- Script: "$(params.baz[0]), $(params.foo-is-baz)",
- Image: "my-image",
- },
- expectedError: apis.FieldError{
- Message: `param substitution in scripts is not allowed.`,
- Paths: []string{"script"},
- },
- }}
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- sa := v1beta1.StepActionSpec{
- Image: tt.fields.Image,
- Command: tt.fields.Command,
- Args: tt.fields.Args,
- Script: tt.fields.Script,
- Env: tt.fields.Env,
- Params: tt.fields.Params,
- Results: tt.fields.Results,
- }
- ctx := context.Background()
- sa.SetDefaults(ctx)
- err := sa.Validate(ctx)
- if err == nil {
- t.Fatalf("Expected an error, got nothing for %v", sa)
- }
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("StepActionSpec.Validate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/swagger.json b/upstream/pkg/apis/pipeline/v1beta1/swagger.json
index 7e2d8d9d95c..622b1d680c3 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/swagger.json
+++ b/upstream/pkg/apis/pipeline/v1beta1/swagger.json
@@ -28,14 +28,6 @@
"default": ""
}
},
- "priorityClassName": {
- "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
- "type": "string"
- },
- "securityContext": {
- "description": "SecurityContext sets the security context for the pod",
- "$ref": "#/definitions/v1.PodSecurityContext"
- },
"tolerations": {
"description": "If specified, the pod's tolerations.",
"type": "array",
@@ -163,16 +155,11 @@
"description": "TaskRunStepArtifact represents an artifact produced or used by a step within a task run. It directly uses the Artifact type for its structure.",
"type": "object",
"properties": {
- "buildOutput": {
- "description": "Indicate if the artifact is a build output or a by-product",
- "type": "boolean"
- },
"name": {
- "description": "The artifact's identifying category name",
"type": "string"
},
"values": {
- "description": "A collection of values related to the artifact",
+ "description": "The artifact's identifying category name",
"type": "array",
"items": {
"default": {},
@@ -488,6 +475,7 @@
},
"spec": {
"description": "Spec is a specification of a custom task",
+ "default": {},
"$ref": "#/definitions/k8s.io.apimachinery.pkg.runtime.RawExtension"
}
}
@@ -547,6 +535,7 @@
},
"spec": {
"description": "Spec is a specification of a custom task",
+ "default": {},
"$ref": "#/definitions/k8s.io.apimachinery.pkg.runtime.RawExtension"
},
"stepTemplate": {
@@ -673,6 +662,7 @@
"default": ""
},
"value": {
+ "default": {},
"$ref": "#/definitions/v1beta1.ParamValue"
}
}
@@ -839,7 +829,7 @@
"type": "string"
},
"bundle": {
- "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
+ "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.",
"type": "string"
},
"name": {
@@ -904,6 +894,7 @@
},
"value": {
"description": "Value the expression used to retrieve the value",
+ "default": {},
"$ref": "#/definitions/v1beta1.ParamValue"
}
}
@@ -974,6 +965,7 @@
},
"value": {
"description": "Value is the result returned from the execution of this PipelineRun",
+ "default": {},
"$ref": "#/definitions/v1beta1.ParamValue"
}
}
@@ -1730,10 +1722,6 @@
"$ref": "#/definitions/v1.Param"
},
"x-kubernetes-list-type": "atomic"
- },
- "url": {
- "description": "URL is the runtime url passed to the resolver to help it figure out how to resolver the resource being requested. This is currently at an ALPHA stability level and subject to alpha API compatibility policies.",
- "type": "string"
}
}
},
@@ -1945,10 +1933,6 @@
"default": {},
"$ref": "#/definitions/v1.ResourceRequirements"
},
- "restartPolicy": {
- "description": "RestartPolicy refers to kubernetes RestartPolicy. It can only be set for an initContainer and must have it's policy set to \"Always\". It is currently left optional to help support Kubernetes versions prior to 1.29 when this feature was introduced.",
- "type": "string"
- },
"script": {
"description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.",
"type": "string"
@@ -2256,13 +2240,6 @@
"x-kubernetes-patch-merge-key": "mountPath",
"x-kubernetes-patch-strategy": "merge"
},
- "when": {
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1beta1.WhenExpression"
- }
- },
"workingDir": {
"description": "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"type": "string"
@@ -2278,141 +2255,6 @@
}
}
},
- "v1beta1.StepAction": {
- "description": "StepAction represents the actionable components of Step. The Step can only reference it from the cluster or using remote resolution.",
- "type": "object",
- "properties": {
- "apiVersion": {
- "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- "type": "string"
- },
- "kind": {
- "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- "type": "string"
- },
- "metadata": {
- "default": {},
- "$ref": "#/definitions/v1.ObjectMeta"
- },
- "spec": {
- "description": "Spec holds the desired state of the Step from the client",
- "default": {},
- "$ref": "#/definitions/v1beta1.StepActionSpec"
- }
- }
- },
- "v1beta1.StepActionList": {
- "description": "StepActionList contains a list of StepActions",
- "type": "object",
- "required": [
- "items"
- ],
- "properties": {
- "apiVersion": {
- "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
- "type": "string"
- },
- "items": {
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1beta1.StepAction"
- }
- },
- "kind": {
- "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
- "type": "string"
- },
- "metadata": {
- "default": {},
- "$ref": "#/definitions/v1.ListMeta"
- }
- }
- },
- "v1beta1.StepActionSpec": {
- "description": "StepActionSpec contains the actionable components of a step.",
- "type": "object",
- "properties": {
- "args": {
- "description": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- "type": "array",
- "items": {
- "type": "string",
- "default": ""
- },
- "x-kubernetes-list-type": "atomic"
- },
- "command": {
- "description": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- "type": "array",
- "items": {
- "type": "string",
- "default": ""
- },
- "x-kubernetes-list-type": "atomic"
- },
- "description": {
- "description": "Description is a user-facing description of the stepaction that may be used to populate a UI.",
- "type": "string"
- },
- "env": {
- "description": "List of environment variables to set in the container. Cannot be updated.",
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1.EnvVar"
- },
- "x-kubernetes-list-type": "atomic",
- "x-kubernetes-patch-merge-key": "name",
- "x-kubernetes-patch-strategy": "merge"
- },
- "image": {
- "description": "Image reference name to run for this StepAction. More info: https://kubernetes.io/docs/concepts/containers/images",
- "type": "string"
- },
- "params": {
- "description": "Params is a list of input parameters required to run the stepAction. Params must be supplied as inputs in Steps unless they declare a defaultvalue.",
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1.ParamSpec"
- },
- "x-kubernetes-list-type": "atomic"
- },
- "results": {
- "description": "Results are values that this StepAction can output",
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1.StepResult"
- },
- "x-kubernetes-list-type": "atomic"
- },
- "script": {
- "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.",
- "type": "string"
- },
- "securityContext": {
- "description": "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ The value set in StepAction will take precedence over the value from Task.",
- "$ref": "#/definitions/v1.SecurityContext"
- },
- "volumeMounts": {
- "description": "Volumes to mount into the Step's filesystem. Cannot be updated.",
- "type": "array",
- "items": {
- "default": {},
- "$ref": "#/definitions/v1.VolumeMount"
- },
- "x-kubernetes-list-type": "atomic",
- "x-kubernetes-patch-merge-key": "mountPath",
- "x-kubernetes-patch-strategy": "merge"
- },
- "workingDir": {
- "description": "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
- "type": "string"
- }
- }
- },
"v1beta1.StepOutputConfig": {
"description": "StepOutputConfig stores configuration for a step output stream.",
"type": "object",
@@ -2450,9 +2292,6 @@
"$ref": "#/definitions/v1beta1.Artifact"
}
},
- "provenance": {
- "$ref": "#/definitions/v1beta1.Provenance"
- },
"results": {
"type": "array",
"items": {
@@ -2647,14 +2486,6 @@
"description": "TaskBreakpoints defines the breakpoint config for a particular Task",
"type": "object",
"properties": {
- "beforeSteps": {
- "type": "array",
- "items": {
- "type": "string",
- "default": ""
- },
- "x-kubernetes-list-type": "atomic"
- },
"onFailure": {
"description": "if enabled, pause TaskRun on failure of a step failed step will not exit",
"type": "string"
@@ -2698,7 +2529,7 @@
"type": "string"
},
"bundle": {
- "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead. The field is staying there for go client backward compatibility, but is not used/allowed anymore.",
+ "description": "Bundle url reference to a Tekton Bundle.\n\nDeprecated: Please use ResolverRef with the bundles resolver instead.",
"type": "string"
},
"kind": {
@@ -2970,6 +2801,7 @@
},
"value": {
"description": "Value the given value of the result",
+ "default": {},
"$ref": "#/definitions/v1beta1.ParamValue"
}
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/task_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/task_conversion.go
index 9a0d4fe503d..4c026e10b60 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/task_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/task_conversion.go
@@ -212,7 +212,7 @@ func serializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskName
existingDeprecations := taskDeprecations{}
if str, ok := meta.Annotations[TaskDeprecationsAnnotationKey]; ok {
if err := json.Unmarshal([]byte(str), &existingDeprecations); err != nil {
- return fmt.Errorf("error serializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err)
+ return fmt.Errorf("error deserializing key %s from metadata: %w", TaskDeprecationsAnnotationKey, err)
}
}
if taskDeprecation != nil {
@@ -238,7 +238,7 @@ func deserializeTaskDeprecations(meta *metav1.ObjectMeta, spec *TaskSpec, taskNa
if len(spec.Steps) != len(td.DeprecatedSteps) {
return errors.New("length of deserialized steps mismatch the length of target steps")
}
- for i := range len(spec.Steps) {
+ for i := 0; i < len(spec.Steps); i++ {
spec.Steps[i].DeprecatedPorts = td.DeprecatedSteps[i].DeprecatedPorts
spec.Steps[i].DeprecatedLivenessProbe = td.DeprecatedSteps[i].DeprecatedLivenessProbe
spec.Steps[i].DeprecatedReadinessProbe = td.DeprecatedSteps[i].DeprecatedReadinessProbe
diff --git a/upstream/pkg/apis/pipeline/v1beta1/task_conversion_test.go b/upstream/pkg/apis/pipeline/v1beta1/task_conversion_test.go
index 64aaca8ab10..ddf97617f8c 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/task_conversion_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/task_conversion_test.go
@@ -95,22 +95,6 @@ spec:
properties:
key:
type: string
-`
- stepWhenTaskYAML := `
-metadata:
- name: foo
- namespace: bar
-spec:
- displayName: "task-step-when"
- description: test
- steps:
- - image: foo
- name: should-execute
- image: bash:latest
- when:
- - input: "$(workspaces.custom.bound)"
- operator: in
- values: ["true"]
`
stepActionTaskYAML := `
metadata:
@@ -346,9 +330,6 @@ spec:
stepResultTaskV1beta1 := parse.MustParseV1beta1Task(t, stepResultTaskYAML)
stepResultTaskV1 := parse.MustParseV1Task(t, stepResultTaskYAML)
- stepWhenTaskV1beta1 := parse.MustParseV1beta1Task(t, stepWhenTaskYAML)
- stepWhenTaskV1 := parse.MustParseV1Task(t, stepWhenTaskYAML)
-
stepActionTaskV1beta1 := parse.MustParseV1beta1Task(t, stepActionTaskYAML)
stepActionTaskV1 := parse.MustParseV1Task(t, stepActionTaskYAML)
@@ -394,10 +375,6 @@ spec:
name: "step results in task",
v1beta1Task: stepResultTaskV1beta1,
v1Task: stepResultTaskV1,
- }, {
- name: "step when in task",
- v1beta1Task: stepWhenTaskV1beta1,
- v1Task: stepWhenTaskV1,
}, {
name: "step action in task",
v1beta1Task: stepActionTaskV1beta1,
diff --git a/upstream/pkg/apis/pipeline/v1beta1/task_validation.go b/upstream/pkg/apis/pipeline/v1beta1/task_validation.go
index 4d03a950125..9ef0db6909a 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/task_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/task_validation.go
@@ -249,9 +249,6 @@ func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) {
errs = errs.Also(v1.ValidateStepResultsVariables(ctx, s.Results, s.Script).ViaIndex(idx))
errs = errs.Also(v1.ValidateStepResults(ctx, s.Results).ViaIndex(idx).ViaField("results"))
}
- if len(s.When) > 0 {
- errs = errs.Also(s.When.validate(ctx).ViaIndex(idx))
- }
}
return errs
}
@@ -304,11 +301,7 @@ func errorIfStepResultReferenceinField(value, fieldName string) (errs *apis.Fiel
}
func stepArtifactReferenceExists(src string) bool {
- return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.StepArtifactPathPattern+")")
-}
-
-func taskArtifactReferenceExists(src string) bool {
- return len(artifactref.TaskArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$("+artifactref.TaskArtifactPathPattern+")")
+ return len(artifactref.StepArtifactRegex.FindAllStringSubmatch(src, -1)) > 0 || strings.Contains(src, "$(step.artifacts.path)")
}
func errorIfStepArtifactReferencedInField(value, fieldName string) (errs *apis.FieldError) {
@@ -375,8 +368,17 @@ func validateStepResultReference(s Step) (errs *apis.FieldError) {
}
func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) {
- if err := validateArtifactsReferencesInStep(ctx, s); err != nil {
- return err
+ if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
+ var t []string
+ t = append(t, s.Script)
+ t = append(t, s.Command...)
+ t = append(t, s.Args...)
+ for _, e := range s.Env {
+ t = append(t, e.Value)
+ }
+ if slices.ContainsFunc(t, stepArtifactReferenceExists) {
+ return errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), ""))
+ }
}
if s.Ref != nil {
@@ -444,11 +446,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi
return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true in order to use Results in Steps.", config.EnableStepActions), "")
}
}
- if len(s.When) > 0 {
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableStepActions && isCreateOrUpdateAndDiverged(ctx, s) {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true in order to use When in Steps.", config.EnableStepActions), "")
- }
- }
if s.Image == "" {
errs = errs.Also(apis.ErrMissingField("Image"))
}
@@ -532,22 +529,6 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi
return errs
}
-func validateArtifactsReferencesInStep(ctx context.Context, s Step) *apis.FieldError {
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableArtifacts {
- var t []string
- t = append(t, s.Script)
- t = append(t, s.Command...)
- t = append(t, s.Args...)
- for _, e := range s.Env {
- t = append(t, e.Value)
- }
- if slices.ContainsFunc(t, stepArtifactReferenceExists) || slices.ContainsFunc(t, taskArtifactReferenceExists) {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts), "")
- }
- }
- return nil
-}
-
// ValidateParameterTypes validates all the types within a slice of ParamSpecs
func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) {
for _, p := range params {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/task_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/task_validation_test.go
index fec9351b99e..4f36224b78e 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/task_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/task_validation_test.go
@@ -33,9 +33,7 @@ import (
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/utils/pointer"
"knative.dev/pkg/apis"
)
@@ -339,41 +337,6 @@ func TestTaskSpecValidate(t *testing.T) {
hello world`,
}},
},
- }, {
- name: "step template included in validation with stepaction",
- fields: fields{
- Steps: []v1beta1.Step{{
- Name: "astep",
- Ref: &v1beta1.Ref{
- Name: "stepAction",
- },
- }},
- StepTemplate: &v1beta1.StepTemplate{
- Image: "some-image",
- SecurityContext: &corev1.SecurityContext{
- RunAsNonRoot: pointer.Bool(true),
- },
- VolumeMounts: []corev1.VolumeMount{{
- Name: "data",
- MountPath: "/workspace/data",
- }},
- Env: []corev1.EnvVar{{
- Name: "KEEP_THIS",
- Value: "A_VALUE",
- }, {
- Name: "SOME_KEY_1",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- Key: "A_KEY",
- LocalObjectReference: corev1.LocalObjectReference{Name: "A_NAME"},
- },
- },
- }, {
- Name: "SOME_KEY_2",
- Value: "VALUE_2",
- }},
- },
- },
}, {
name: "valid step with parameterized script",
fields: fields{
@@ -797,7 +760,8 @@ func TestTaskValidateError(t *testing.T) {
Spec: v1beta1.TaskSpec{
Params: tt.fields.Params,
Steps: tt.fields.Steps,
- }}
+ },
+ }
ctx := cfgtesting.EnableAlphaAPIFields(context.Background())
task.SetDefaults(ctx)
err := task.Validate(ctx)
@@ -1117,7 +1081,8 @@ func TestTaskSpecValidateError(t *testing.T) {
Name: "mystep",
Image: "my-image",
WorkingDir: "/foo/bar/src/",
- }},
+ },
+ },
},
expectedError: apis.FieldError{
Message: `variable type invalid in "$(params.baz[*])"`,
@@ -1470,43 +1435,44 @@ func TestTaskSpecValidateErrorWithStepActionRef_CreateUpdateEvent(t *testing.T)
isCreate bool
isUpdate bool
expectedError apis.FieldError
- }{{
- name: "is create ctx",
- Steps: []v1beta1.Step{{
- Ref: &v1beta1.Ref{
- Name: "stepAction",
+ }{
+ {
+ name: "is create ctx",
+ Steps: []v1beta1.Step{{
+ Ref: &v1beta1.Ref{
+ Name: "stepAction",
+ },
+ }},
+ isCreate: true,
+ isUpdate: false,
+ expectedError: apis.FieldError{
+ Message: "feature flag enable-step-actions should be set to true to reference StepActions in Steps.",
+ Paths: []string{"steps[0]"},
},
- }},
- isCreate: true,
- isUpdate: false,
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true to reference StepActions in Steps.",
- Paths: []string{"steps[0]"},
- },
- }, {
- name: "is update ctx",
- Steps: []v1beta1.Step{{
- Ref: &v1beta1.Ref{
- Name: "stepAction",
+ }, {
+ name: "is update ctx",
+ Steps: []v1beta1.Step{{
+ Ref: &v1beta1.Ref{
+ Name: "stepAction",
+ },
+ }},
+ isCreate: false,
+ isUpdate: true,
+ expectedError: apis.FieldError{
+ Message: "feature flag enable-step-actions should be set to true to reference StepActions in Steps.",
+ Paths: []string{"steps[0]"},
},
- }},
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true to reference StepActions in Steps.",
- Paths: []string{"steps[0]"},
+ }, {
+ name: "ctx is not create or update",
+ Steps: []v1beta1.Step{{
+ Ref: &v1beta1.Ref{
+ Name: "stepAction",
+ },
+ }},
+ isCreate: false,
+ isUpdate: false,
+ expectedError: apis.FieldError{},
},
- }, {
- name: "ctx is not create or update",
- Steps: []v1beta1.Step{{
- Ref: &v1beta1.Ref{
- Name: "stepAction",
- },
- }},
- isCreate: false,
- isUpdate: false,
- expectedError: apis.FieldError{},
- },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -1682,85 +1648,86 @@ func TestTaskSpecValidateErrorWithStepResultRef(t *testing.T) {
name string
Steps []v1beta1.Step
expectedError apis.FieldError
- }{{
- name: "Cannot reference step results in image",
- Steps: []v1beta1.Step{{
- Image: "$(steps.prevStep.results.resultName)",
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].image"},
- },
- }, {
- name: "Cannot reference step results in script",
- Steps: []v1beta1.Step{{
- Image: "my-img",
- Script: "echo $(steps.prevStep.results.resultName)",
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].script"},
- },
- }, {
- name: "Cannot reference step results in workingDir",
- Steps: []v1beta1.Step{{
- Image: "my-img",
- WorkingDir: "$(steps.prevStep.results.resultName)",
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].workingDir"},
- },
- }, {
- name: "Cannot reference step results in envFrom",
- Steps: []v1beta1.Step{{
- Image: "my-img",
- EnvFrom: []corev1.EnvFromSource{{
- Prefix: "$(steps.prevStep.results.resultName)",
- ConfigMapRef: &corev1.ConfigMapEnvSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(steps.prevStep.results.resultName)",
+ }{
+ {
+ name: "Cannot reference step results in image",
+ Steps: []v1beta1.Step{{
+ Image: "$(steps.prevStep.results.resultName)",
+ }},
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].image"},
+ },
+ }, {
+ name: "Cannot reference step results in script",
+ Steps: []v1beta1.Step{{
+ Image: "my-img",
+ Script: "echo $(steps.prevStep.results.resultName)",
+ }},
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].script"},
+ },
+ }, {
+ name: "Cannot reference step results in workingDir",
+ Steps: []v1beta1.Step{{
+ Image: "my-img",
+ WorkingDir: "$(steps.prevStep.results.resultName)",
+ }},
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].workingDir"},
+ },
+ }, {
+ name: "Cannot reference step results in envFrom",
+ Steps: []v1beta1.Step{{
+ Image: "my-img",
+ EnvFrom: []corev1.EnvFromSource{{
+ Prefix: "$(steps.prevStep.results.resultName)",
+ ConfigMapRef: &corev1.ConfigMapEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(steps.prevStep.results.resultName)",
+ },
},
- },
- SecretRef: &corev1.SecretEnvSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(steps.prevStep.results.resultName)",
+ SecretRef: &corev1.SecretEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(steps.prevStep.results.resultName)",
+ },
},
- },
+ }},
}},
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].envFrom.configMapRef", "steps[0].envFrom.prefix", "steps[0].envFrom.secretRef"},
- },
- }, {
- name: "Cannot reference step results in VolumeMounts",
- Steps: []v1beta1.Step{{
- Image: "my-img",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(steps.prevStep.results.resultName)",
- MountPath: "$(steps.prevStep.results.resultName)",
- SubPath: "$(steps.prevStep.results.resultName)",
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].envFrom.configMapRef", "steps[0].envFrom.prefix", "steps[0].envFrom.secretRef"},
+ },
+ }, {
+ name: "Cannot reference step results in VolumeMounts",
+ Steps: []v1beta1.Step{{
+ Image: "my-img",
+ VolumeMounts: []corev1.VolumeMount{{
+ Name: "$(steps.prevStep.results.resultName)",
+ MountPath: "$(steps.prevStep.results.resultName)",
+ SubPath: "$(steps.prevStep.results.resultName)",
+ }},
}},
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].volumeMounts.name", "steps[0].volumeMounts.mountPath", "steps[0].volumeMounts.subPath"},
- },
- }, {
- name: "Cannot reference step results in VolumeDevices",
- Steps: []v1beta1.Step{{
- Image: "my-img",
- VolumeDevices: []corev1.VolumeDevice{{
- Name: "$(steps.prevStep.results.resultName)",
- DevicePath: "$(steps.prevStep.results.resultName)",
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].volumeMounts.name", "steps[0].volumeMounts.mountPath", "steps[0].volumeMounts.subPath"},
+ },
+ }, {
+ name: "Cannot reference step results in VolumeDevices",
+ Steps: []v1beta1.Step{{
+ Image: "my-img",
+ VolumeDevices: []corev1.VolumeDevice{{
+ Name: "$(steps.prevStep.results.resultName)",
+ DevicePath: "$(steps.prevStep.results.resultName)",
+ }},
}},
- }},
- expectedError: apis.FieldError{
- Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
- Paths: []string{"steps[0].volumeDevices.name", "steps[0].volumeDevices.devicePath"},
+ expectedError: apis.FieldError{
+ Message: "stepResult substitutions are only allowed in env, command and args. Found usage in",
+ Paths: []string{"steps[0].volumeDevices.name", "steps[0].volumeDevices.devicePath"},
+ },
},
- },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -2014,70 +1981,72 @@ func TestIncompatibleAPIVersions(t *testing.T) {
name string
requiredVersion string
spec v1beta1.TaskSpec
- }{{
- name: "step workspace requires beta",
- requiredVersion: "beta",
- spec: v1beta1.TaskSpec{
- Workspaces: []v1beta1.WorkspaceDeclaration{{
- Name: "foo",
- }},
- Steps: []v1beta1.Step{{
- Image: "foo",
- Workspaces: []v1beta1.WorkspaceUsage{{
+ }{
+ {
+ name: "step workspace requires beta",
+ requiredVersion: "beta",
+ spec: v1beta1.TaskSpec{
+ Workspaces: []v1beta1.WorkspaceDeclaration{{
Name: "foo",
}},
- }},
- },
- }, {
- name: "sidecar workspace requires beta",
- requiredVersion: "beta",
- spec: v1beta1.TaskSpec{
- Workspaces: []v1beta1.WorkspaceDeclaration{{
- Name: "foo",
- }},
- Steps: []v1beta1.Step{{
- Image: "foo",
- }},
- Sidecars: []v1beta1.Sidecar{{
- Image: "foo",
- Workspaces: []v1beta1.WorkspaceUsage{{
+ Steps: []v1beta1.Step{{
+ Image: "foo",
+ Workspaces: []v1beta1.WorkspaceUsage{{
+ Name: "foo",
+ }},
+ }},
+ },
+ }, {
+ name: "sidecar workspace requires beta",
+ requiredVersion: "beta",
+ spec: v1beta1.TaskSpec{
+ Workspaces: []v1beta1.WorkspaceDeclaration{{
Name: "foo",
}},
- }},
- },
- }, {
- name: "windows script support requires alpha",
- requiredVersion: "alpha",
- spec: v1beta1.TaskSpec{
- Steps: []v1beta1.Step{{
- Image: "my-image",
- Script: `
+ Steps: []v1beta1.Step{{
+ Image: "foo",
+ }},
+ Sidecars: []v1beta1.Sidecar{{
+ Image: "foo",
+ Workspaces: []v1beta1.WorkspaceUsage{{
+ Name: "foo",
+ }},
+ }},
+ },
+ }, {
+ name: "windows script support requires alpha",
+ requiredVersion: "alpha",
+ spec: v1beta1.TaskSpec{
+ Steps: []v1beta1.Step{{
+ Image: "my-image",
+ Script: `
#!win powershell -File
script-1`,
- }},
- },
- }, {
- name: "stdout stream support requires alpha",
- requiredVersion: "alpha",
- spec: v1beta1.TaskSpec{
- Steps: []v1beta1.Step{{
- Image: "foo",
- StdoutConfig: &v1beta1.StepOutputConfig{
- Path: "/tmp/stdout.txt",
- },
- }},
+ }},
+ },
+ }, {
+ name: "stdout stream support requires alpha",
+ requiredVersion: "alpha",
+ spec: v1beta1.TaskSpec{
+ Steps: []v1beta1.Step{{
+ Image: "foo",
+ StdoutConfig: &v1beta1.StepOutputConfig{
+ Path: "/tmp/stdout.txt",
+ },
+ }},
+ },
+ }, {
+ name: "stderr stream support requires alpha",
+ requiredVersion: "alpha",
+ spec: v1beta1.TaskSpec{
+ Steps: []v1beta1.Step{{
+ Image: "foo",
+ StderrConfig: &v1beta1.StepOutputConfig{
+ Path: "/tmp/stderr.txt",
+ },
+ }},
+ },
},
- }, {
- name: "stderr stream support requires alpha",
- requiredVersion: "alpha",
- spec: v1beta1.TaskSpec{
- Steps: []v1beta1.Step{{
- Image: "foo",
- StderrConfig: &v1beta1.StepOutputConfig{
- Path: "/tmp/stderr.txt",
- },
- }},
- }},
} {
for _, version := range versions {
testName := fmt.Sprintf("(using %s) %s", version, tt.name)
@@ -2115,161 +2084,165 @@ func TestGetArrayIndexParamRefs(t *testing.T) {
name string
taskspec *v1beta1.TaskSpec
want sets.String
- }{{
- name: "steps reference",
- taskspec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "array-params",
- Default: v1beta1.NewStructuredValues("bar", "foo"),
- }},
- Steps: []v1beta1.Step{{
- Name: "$(params.array-params[10])",
- Image: "$(params.array-params[11])",
- Command: []string{"$(params.array-params[12])"},
- Args: []string{"$(params.array-params[13])"},
- Script: "echo $(params.array-params[14])",
- Env: []corev1.EnvVar{{
- Value: "$(params.array-params[15])",
- ValueFrom: &corev1.EnvVarSource{
- SecretKeyRef: &corev1.SecretKeySelector{
- Key: "$(params.array-params[16])",
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[17])",
+ }{
+ {
+ name: "steps reference",
+ taskspec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "array-params",
+ Default: v1beta1.NewStructuredValues("bar", "foo"),
+ }},
+ Steps: []v1beta1.Step{{
+ Name: "$(params.array-params[10])",
+ Image: "$(params.array-params[11])",
+ Command: []string{"$(params.array-params[12])"},
+ Args: []string{"$(params.array-params[13])"},
+ Script: "echo $(params.array-params[14])",
+ Env: []corev1.EnvVar{{
+ Value: "$(params.array-params[15])",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "$(params.array-params[16])",
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(params.array-params[17])",
+ },
+ },
+ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
+ Key: "$(params.array-params[18])",
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(params.array-params[19])",
+ },
},
},
- ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
- Key: "$(params.array-params[18])",
+ }},
+ EnvFrom: []corev1.EnvFromSource{{
+ Prefix: "$(params.array-params[20])",
+ ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[19])",
+ Name: "$(params.array-params[21])",
},
},
- },
- }},
- EnvFrom: []corev1.EnvFromSource{{
- Prefix: "$(params.array-params[20])",
- ConfigMapRef: &corev1.ConfigMapEnvSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[21])",
- },
- },
- SecretRef: &corev1.SecretEnvSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[22])",
+ SecretRef: &corev1.SecretEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(params.array-params[22])",
+ },
},
- },
- }},
- WorkingDir: "$(params.array-params[23])",
- VolumeMounts: []corev1.VolumeMount{{
- Name: "$(params.array-params[24])",
- MountPath: "$(params.array-params[25])",
- SubPath: "$(params.array-params[26])",
+ }},
+ WorkingDir: "$(params.array-params[23])",
+ VolumeMounts: []corev1.VolumeMount{{
+ Name: "$(params.array-params[24])",
+ MountPath: "$(params.array-params[25])",
+ SubPath: "$(params.array-params[26])",
+ }},
}},
- }},
- StepTemplate: &v1beta1.StepTemplate{
- Image: "$(params.array-params[27])",
+ StepTemplate: &v1beta1.StepTemplate{
+ Image: "$(params.array-params[27])",
+ },
},
- },
- want: sets.NewString("$(params.array-params[10])", "$(params.array-params[11])", "$(params.array-params[12])", "$(params.array-params[13])", "$(params.array-params[14])",
- "$(params.array-params[15])", "$(params.array-params[16])", "$(params.array-params[17])", "$(params.array-params[18])", "$(params.array-params[19])", "$(params.array-params[20])",
- "$(params.array-params[21])", "$(params.array-params[22])", "$(params.array-params[23])", "$(params.array-params[24])", "$(params.array-params[25])", "$(params.array-params[26])", "$(params.array-params[27])"),
- }, {
- name: "stepTemplate reference",
- taskspec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "array-params",
- Default: v1beta1.NewStructuredValues("bar", "foo"),
- }},
- StepTemplate: &v1beta1.StepTemplate{
- Image: "$(params.array-params[3])",
+ want: sets.NewString("$(params.array-params[10])", "$(params.array-params[11])", "$(params.array-params[12])", "$(params.array-params[13])", "$(params.array-params[14])",
+ "$(params.array-params[15])", "$(params.array-params[16])", "$(params.array-params[17])", "$(params.array-params[18])", "$(params.array-params[19])", "$(params.array-params[20])",
+ "$(params.array-params[21])", "$(params.array-params[22])", "$(params.array-params[23])", "$(params.array-params[24])", "$(params.array-params[25])", "$(params.array-params[26])", "$(params.array-params[27])"),
+ }, {
+ name: "stepTemplate reference",
+ taskspec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "array-params",
+ Default: v1beta1.NewStructuredValues("bar", "foo"),
+ }},
+ StepTemplate: &v1beta1.StepTemplate{
+ Image: "$(params.array-params[3])",
+ },
},
- },
- want: sets.NewString("$(params.array-params[3])"),
- }, {
- name: "volumes references",
- taskspec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "array-params",
- Default: v1beta1.NewStructuredValues("bar", "foo"),
- }},
- Volumes: []corev1.Volume{{
- Name: "$(params.array-params[10])",
- VolumeSource: corev1.VolumeSource{
- ConfigMap: &corev1.ConfigMapVolumeSource{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[11])",
- },
- Items: []corev1.KeyToPath{{
- Key: "$(params.array-params[12])",
- Path: "$(params.array-params[13])",
- },
- },
- },
- Secret: &corev1.SecretVolumeSource{
- SecretName: "$(params.array-params[14])",
- Items: []corev1.KeyToPath{{
- Key: "$(params.array-params[15])",
- Path: "$(params.array-params[16])",
- }},
- },
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "$(params.array-params[17])",
- },
- Projected: &corev1.ProjectedVolumeSource{
- Sources: []corev1.VolumeProjection{{
- ConfigMap: &corev1.ConfigMapProjection{
+ want: sets.NewString("$(params.array-params[3])"),
+ }, {
+ name: "volumes references",
+ taskspec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "array-params",
+ Default: v1beta1.NewStructuredValues("bar", "foo"),
+ }},
+ Volumes: []corev1.Volume{
+ {
+ Name: "$(params.array-params[10])",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[18])",
+ Name: "$(params.array-params[11])",
},
- },
- Secret: &corev1.SecretProjection{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "$(params.array-params[19])",
+ Items: []corev1.KeyToPath{
+ {
+ Key: "$(params.array-params[12])",
+ Path: "$(params.array-params[13])",
+ },
},
},
- ServiceAccountToken: &corev1.ServiceAccountTokenProjection{
- Audience: "$(params.array-params[20])",
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "$(params.array-params[14])",
+ Items: []corev1.KeyToPath{{
+ Key: "$(params.array-params[15])",
+ Path: "$(params.array-params[16])",
+ }},
+ },
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "$(params.array-params[17])",
+ },
+ Projected: &corev1.ProjectedVolumeSource{
+ Sources: []corev1.VolumeProjection{{
+ ConfigMap: &corev1.ConfigMapProjection{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(params.array-params[18])",
+ },
+ },
+ Secret: &corev1.SecretProjection{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "$(params.array-params[19])",
+ },
+ },
+ ServiceAccountToken: &corev1.ServiceAccountTokenProjection{
+ Audience: "$(params.array-params[20])",
+ },
+ }},
+ },
+ CSI: &corev1.CSIVolumeSource{
+ NodePublishSecretRef: &corev1.LocalObjectReference{
+ Name: "$(params.array-params[21])",
+ },
+ VolumeAttributes: map[string]string{"key": "$(params.array-params[22])"},
},
- }},
- },
- CSI: &corev1.CSIVolumeSource{
- NodePublishSecretRef: &corev1.LocalObjectReference{
- Name: "$(params.array-params[21])",
},
- VolumeAttributes: map[string]string{"key": "$(params.array-params[22])"},
},
},
},
+ want: sets.NewString("$(params.array-params[10])", "$(params.array-params[11])", "$(params.array-params[12])", "$(params.array-params[13])", "$(params.array-params[14])",
+ "$(params.array-params[15])", "$(params.array-params[16])", "$(params.array-params[17])", "$(params.array-params[18])", "$(params.array-params[19])", "$(params.array-params[20])",
+ "$(params.array-params[21])", "$(params.array-params[22])"),
+ }, {
+ name: "workspaces references",
+ taskspec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "array-params",
+ Default: v1beta1.NewStructuredValues("bar", "foo"),
+ }},
+ Workspaces: []v1beta1.WorkspaceDeclaration{{
+ MountPath: "$(params.array-params[3])",
+ }},
},
- },
- want: sets.NewString("$(params.array-params[10])", "$(params.array-params[11])", "$(params.array-params[12])", "$(params.array-params[13])", "$(params.array-params[14])",
- "$(params.array-params[15])", "$(params.array-params[16])", "$(params.array-params[17])", "$(params.array-params[18])", "$(params.array-params[19])", "$(params.array-params[20])",
- "$(params.array-params[21])", "$(params.array-params[22])"),
- }, {
- name: "workspaces references",
- taskspec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "array-params",
- Default: v1beta1.NewStructuredValues("bar", "foo"),
- }},
- Workspaces: []v1beta1.WorkspaceDeclaration{{
- MountPath: "$(params.array-params[3])",
- }},
- },
- want: sets.NewString("$(params.array-params[3])"),
- }, {
- name: "sidecar references",
- taskspec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "array-params",
- Default: v1beta1.NewStructuredValues("bar", "foo"),
- }},
- Sidecars: []v1beta1.Sidecar{{
- Script: "$(params.array-params[3])",
- },
+ want: sets.NewString("$(params.array-params[3])"),
+ }, {
+ name: "sidecar references",
+ taskspec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "array-params",
+ Default: v1beta1.NewStructuredValues("bar", "foo"),
+ }},
+ Sidecars: []v1beta1.Sidecar{
+ {
+ Script: "$(params.array-params[3])",
+ },
+ },
},
+ want: sets.NewString("$(params.array-params[3])"),
},
- want: sets.NewString("$(params.array-params[3])"),
- },
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
@@ -2623,7 +2596,7 @@ func TestTaskSpecValidateSuccessWithArtifactsRefFlagEnabled(t *testing.T) {
name: "reference step artifacts in Env",
Steps: []v1beta1.Step{{
Image: "busybox",
- Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs.image)"}},
+ Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs)"}},
}},
},
{
@@ -2705,7 +2678,7 @@ func TestTaskSpecValidateErrorWithArtifactsRefFlagNotEnabled(t *testing.T) {
{
name: "Cannot reference step artifacts in Env without setting enable-artifacts to true",
Steps: []v1beta1.Step{{
- Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs.image)"}},
+ Env: []corev1.EnvVar{{Name: "AAA", Value: "$(steps.aaa.outputs)"}},
}},
expectedError: apis.FieldError{
Message: fmt.Sprintf("feature flag %s should be set to true to use artifacts feature.", config.EnableArtifacts),
@@ -2905,85 +2878,3 @@ func TestTaskSpecValidateErrorWithArtifactsRef(t *testing.T) {
})
}
}
-
-func TestTaskSpecValidate_StepWhen_Error(t *testing.T) {
- tests := []struct {
- name string
- ts *v1beta1.TaskSpec
- isCreate bool
- Results []v1.StepResult
- isUpdate bool
- baselineTaskRun *v1beta1.TaskRun
- expectedError apis.FieldError
- EnableStepAction bool
- EnableCEL bool
- }{
- {
- name: "step when not allowed without enable step actions - create event",
- ts: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{
- Image: "my-image",
- When: v1beta1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo"}}},
- }}},
- isCreate: true,
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true in order to use When in Steps.",
- Paths: []string{"steps[0]"},
- },
- },
- {
- name: "step when not allowed without enable step actions - update and diverged event",
- ts: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{
- Image: "my-image",
- When: v1beta1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo"}}},
- }}},
- isUpdate: true,
- baselineTaskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- TaskSpec: &v1beta1.TaskSpec{
- Steps: []v1beta1.Step{{
- Image: "my-image",
- Results: []v1.StepResult{{Name: "a-result"}},
- }},
- },
- },
- },
- expectedError: apis.FieldError{
- Message: "feature flag enable-step-actions should be set to true in order to use When in Steps.",
- Paths: []string{"steps[0]"},
- },
- },
- {
- name: "cel not allowed if EnableCELInWhenExpression is false",
- ts: &v1beta1.TaskSpec{Steps: []v1beta1.Step{{
- Image: "my-image",
- When: v1beta1.StepWhenExpressions{{CEL: "'d'=='d'"}},
- }}},
- EnableStepAction: true,
- expectedError: apis.FieldError{
- Message: `feature flag enable-cel-in-whenexpression should be set to true to use CEL: 'd'=='d' in WhenExpression`,
- Paths: []string{"steps[0].when[0]"},
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{
- EnableStepActions: tt.EnableStepAction,
- EnableCELInWhenExpression: tt.EnableCEL,
- },
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselineTaskRun)
- }
- tt.ts.SetDefaults(ctx)
- err := tt.ts.Validate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("StepActionSpec.Validate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskref_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/taskref_conversion.go
index 1b163970b31..c089e9e0dbf 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskref_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskref_conversion.go
@@ -23,12 +23,15 @@ import (
)
func (tr TaskRef) convertTo(ctx context.Context, sink *v1.TaskRef) {
- sink.Name = tr.Name
+ if tr.Bundle == "" {
+ sink.Name = tr.Name
+ }
sink.Kind = v1.TaskKind(tr.Kind)
sink.APIVersion = tr.APIVersion
new := v1.ResolverRef{}
tr.ResolverRef.convertTo(ctx, &new)
sink.ResolverRef = new
+ tr.convertBundleToResolver(sink)
}
// ConvertFrom converts v1beta1 TaskRef from v1 TaskRef
@@ -40,3 +43,25 @@ func (tr *TaskRef) ConvertFrom(ctx context.Context, source v1.TaskRef) {
new.convertFrom(ctx, source.ResolverRef)
tr.ResolverRef = new
}
+
+// convertBundleToResolver converts v1beta1 bundle string to a remote reference with the bundle resolver in v1.
+// The conversion from Resolver to Bundle is not being supported since remote resolution would be turned on by
+// default and it will be in beta before the stored version of CRD getting swapped to v1.
+func (tr TaskRef) convertBundleToResolver(sink *v1.TaskRef) {
+ if tr.Bundle != "" {
+ sink.Kind = ""
+ sink.ResolverRef = v1.ResolverRef{
+ Resolver: "bundles",
+ Params: v1.Params{{
+ Name: "bundle",
+ Value: v1.ParamValue{StringVal: tr.Bundle, Type: v1.ParamTypeString},
+ }, {
+ Name: "name",
+ Value: v1.ParamValue{StringVal: tr.Name, Type: v1.ParamTypeString},
+ }, {
+ Name: "kind",
+ Value: v1.ParamValue{StringVal: "Task", Type: v1.ParamTypeString},
+ }},
+ }
+ }
+}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskref_types.go b/upstream/pkg/apis/pipeline/v1beta1/taskref_types.go
index 9781a4a2133..f8f231cd961 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskref_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskref_types.go
@@ -32,7 +32,6 @@ type TaskRef struct {
// Bundle url reference to a Tekton Bundle.
//
// Deprecated: Please use ResolverRef with the bundles resolver instead.
- // The field is staying there for go client backward compatibility, but is not used/allowed anymore.
// +optional
Bundle string `json:"bundle,omitempty"`
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskref_validation.go b/upstream/pkg/apis/pipeline/v1beta1/taskref_validation.go
index 866dfadd27d..a3e2bb036c3 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskref_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskref_validation.go
@@ -18,9 +18,9 @@ package v1beta1
import (
"context"
- "fmt"
"strings"
+ "github.com/google/go-containerregistry/pkg/name"
"github.com/tektoncd/pipeline/pkg/apis/config"
"k8s.io/apimachinery/pkg/util/validation"
"knative.dev/pkg/apis"
@@ -32,56 +32,46 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) {
if ref == nil {
return errs
}
- if apis.IsInCreate(ctx) && ref.Bundle != "" {
- errs = errs.Also(apis.ErrDisallowedFields("bundle"))
- }
+
switch {
case ref.Resolver != "" || ref.Params != nil:
+ if ref.Resolver != "" {
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
+ if ref.Name != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver"))
+ }
+ if ref.Bundle != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver"))
+ }
+ }
if ref.Params != nil {
errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver params", config.BetaAPIFields).ViaField("params"))
if ref.Name != "" {
errs = errs.Also(apis.ErrMultipleOneOf("name", "params"))
}
+ if ref.Bundle != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("bundle", "params"))
+ }
if ref.Resolver == "" {
errs = errs.Also(apis.ErrMissingField("resolver"))
}
errs = errs.Also(ValidateParameters(ctx, ref.Params))
}
- if ref.Resolver != "" {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "resolver", config.BetaAPIFields).ViaField("resolver"))
- if ref.Name != "" {
- // make sure that the name is url-like.
- err := RefNameLikeUrl(ref.Name)
- if err == nil && !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- // If name is url-like then concise resolver syntax must be enabled
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- if err != nil {
- errs = errs.Also(apis.ErrInvalidValue(err, "name"))
- }
- }
+ case ref.Bundle != "":
+ if ref.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("name"))
}
- case ref.Name != "":
- // ref name can be a Url-like format.
- if err := RefNameLikeUrl(ref.Name); err == nil {
- // If name is url-like then concise resolver syntax must be enabled
- if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableConciseResolverSyntax {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use concise resolver syntax", config.EnableConciseResolverSyntax), ""))
- }
- // In stage1 of concise remote resolvers syntax, this is a required field.
- // TODO: remove this check when implementing stage 2 where this is optional.
- if ref.Resolver == "" {
- errs = errs.Also(apis.ErrMissingField("resolver"))
- }
- // Or, it must be a valid k8s name
- } else {
- // ref name must be a valid k8s name
- if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
- errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
- }
+ errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle"))
+ if _, err := name.ParseReference(ref.Bundle); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error()))
}
default:
- errs = errs.Also(apis.ErrMissingField("name"))
+ if ref.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("name"))
+ } else if errSlice := validation.IsQualifiedName(ref.Name); len(errSlice) != 0 {
+ // TaskRef name must be a valid k8s name
+ errs = errs.Also(apis.ErrInvalidValue(strings.Join(errSlice, ","), "name"))
+ }
}
- return //nolint:nakedret
+ return errs
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskref_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/taskref_validation_test.go
index 5c44529ae22..7d1f4f488ff 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskref_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskref_validation_test.go
@@ -60,6 +60,12 @@ func TestTaskRef_Valid(t *testing.T) {
StringVal: "baz",
},
}}}},
+ }, {
+ name: "valid bundle",
+ taskRef: &v1beta1.TaskRef{
+ Name: "bundled-task",
+ Bundle: "gcr.io/my-bundle"},
+ wc: enableTektonOCIBundles(t),
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
@@ -85,45 +91,85 @@ func TestTaskRef_Invalid(t *testing.T) {
taskRef: &v1beta1.TaskRef{},
wantErr: apis.ErrMissingField("name"),
}, {
- name: "taskRef with resolver and k8s style name",
+ name: "use of bundle without the feature flag set",
+ taskRef: &v1beta1.TaskRef{
+ Name: "my-task",
+ Bundle: "docker.io/foo",
+ },
+ wantErr: apis.ErrGeneric("bundle requires \"enable-tekton-oci-bundles\" feature gate to be true but it is false"),
+ }, {
+ name: "bundle missing name",
+ taskRef: &v1beta1.TaskRef{
+ Bundle: "docker.io/foo",
+ },
+ wantErr: apis.ErrMissingField("name"),
+ wc: enableTektonOCIBundles(t),
+ }, {
+ name: "invalid bundle reference",
+ taskRef: &v1beta1.TaskRef{
+ Name: "my-task",
+ Bundle: "invalid reference",
+ },
+ wantErr: apis.ErrInvalidValue("invalid bundle reference", "bundle", "could not parse reference: invalid reference"),
+ wc: enableTektonOCIBundles(t),
+ }, {
+ name: "taskref params disallowed without resolver",
taskRef: &v1beta1.TaskRef{
- Name: "foo",
ResolverRef: v1beta1.ResolverRef{
- Resolver: "git",
+ Params: v1beta1.Params{},
},
},
- wantErr: apis.ErrInvalidValue(`invalid URI for request`, "name"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMissingField("resolver"),
}, {
- name: "taskRef with url-like name without resolver",
+ name: "taskref resolver disallowed in conjunction with taskref name",
taskRef: &v1beta1.TaskRef{
- Name: "https://foo.com/bar",
+ Name: "foo",
+ ResolverRef: v1beta1.ResolverRef{
+ Resolver: "git",
+ },
},
- wantErr: apis.ErrMissingField("resolver"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("name", "resolver"),
}, {
- name: "taskRef params disallowed in conjunction with pipelineref name",
+ name: "taskref resolver disallowed in conjunction with taskref bundle",
taskRef: &v1beta1.TaskRef{
- Name: "https://foo/bar",
+ Bundle: "bar",
ResolverRef: v1beta1.ResolverRef{
Resolver: "git",
- Params: v1beta1.Params{{Name: "foo", Value: v1beta1.ParamValue{StringVal: "bar"}}},
},
},
- wantErr: apis.ErrMultipleOneOf("name", "params"),
- wc: enableConciseResolverSyntax,
+ wantErr: apis.ErrMultipleOneOf("bundle", "resolver"),
+ wc: enableTektonOCIBundles(t),
}, {
- name: "taskRef with url-like name without enable-concise-resolver-syntax",
- taskRef: &v1beta1.TaskRef{Name: "https://foo.com/bar"},
- wantErr: apis.ErrMissingField("resolver").Also(&apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
- }),
+ name: "taskref params disallowed in conjunction with taskref name",
+ taskRef: &v1beta1.TaskRef{
+ Name: "bar",
+ ResolverRef: v1beta1.ResolverRef{
+ Params: v1beta1.Params{{
+ Name: "foo",
+ Value: v1beta1.ParamValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
+ },
+ },
+ wantErr: apis.ErrMultipleOneOf("name", "params").Also(apis.ErrMissingField("resolver")),
}, {
- name: "taskRef without enable-concise-resolver-syntax",
- taskRef: &v1beta1.TaskRef{Name: "https://foo.com/bar", ResolverRef: v1beta1.ResolverRef{Resolver: "git"}},
- wantErr: &apis.FieldError{
- Message: `feature flag enable-concise-resolver-syntax should be set to true to use concise resolver syntax`,
+ name: "taskref params disallowed in conjunction with taskref bundle",
+ taskRef: &v1beta1.TaskRef{
+ Bundle: "bar",
+ ResolverRef: v1beta1.ResolverRef{
+ Params: v1beta1.Params{{
+ Name: "foo",
+ Value: v1beta1.ParamValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
+ },
+ }},
+ },
},
+ wantErr: apis.ErrMultipleOneOf("bundle", "params").Also(apis.ErrMissingField("resolver")),
+ wc: enableTektonOCIBundles(t),
}, {
name: "invalid taskref name",
taskRef: &v1beta1.TaskRef{Name: "_foo"},
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion.go
index fb01170254f..e9b1bed4f7a 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion.go
@@ -214,18 +214,10 @@ func (trd *TaskRunDebug) convertFrom(ctx context.Context, source v1.TaskRunDebug
func (tbp TaskBreakpoints) convertTo(ctx context.Context, sink *v1.TaskBreakpoints) {
sink.OnFailure = tbp.OnFailure
- if len(tbp.BeforeSteps) > 0 {
- sink.BeforeSteps = make([]string, 0)
- sink.BeforeSteps = append(sink.BeforeSteps, tbp.BeforeSteps...)
- }
}
func (tbp *TaskBreakpoints) convertFrom(ctx context.Context, source v1.TaskBreakpoints) {
tbp.OnFailure = source.OnFailure
- if len(source.BeforeSteps) > 0 {
- tbp.BeforeSteps = make([]string, 0)
- tbp.BeforeSteps = append(tbp.BeforeSteps, source.BeforeSteps...)
- }
}
func (trso TaskRunStepOverride) convertTo(ctx context.Context, sink *v1.TaskRunStepSpec) {
@@ -353,12 +345,6 @@ func (ss StepState) convertTo(ctx context.Context, sink *v1.StepState) {
sink.ImageID = ss.ImageID
sink.Results = nil
- if ss.Provenance != nil {
- new := v1.Provenance{}
- ss.Provenance.convertTo(ctx, &new)
- sink.Provenance = &new
- }
-
if ss.ContainerState.Terminated != nil {
sink.TerminationReason = ss.ContainerState.Terminated.Reason
}
@@ -393,11 +379,6 @@ func (ss *StepState) convertFrom(ctx context.Context, source v1.StepState) {
new.convertFrom(ctx, r)
ss.Results = append(ss.Results, new)
}
- if source.Provenance != nil {
- new := Provenance{}
- new.convertFrom(ctx, *source.Provenance)
- ss.Provenance = &new
- }
for _, o := range source.Outputs {
new := TaskRunStepArtifact{}
new.convertFrom(ctx, o)
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion_test.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion_test.go
index ff387e7d03b..b9e1565dda9 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_conversion_test.go
@@ -50,322 +50,296 @@ func TestTaskRunConversion(t *testing.T) {
tests := []struct {
name string
in *v1beta1.TaskRun
- }{
- {
- name: "simple taskrun",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- Spec: v1beta1.TaskRunSpec{
- TaskRef: &v1beta1.TaskRef{Name: "test-task"},
- },
- },
- }, {
- name: "taskrun conversion deprecated step fields",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- Spec: v1beta1.TaskRunSpec{
- TaskSpec: &v1beta1.TaskSpec{
- Steps: []v1beta1.Step{{
- DeprecatedLivenessProbe: &corev1.Probe{InitialDelaySeconds: 1},
- DeprecatedReadinessProbe: &corev1.Probe{InitialDelaySeconds: 2},
- DeprecatedPorts: []corev1.ContainerPort{{Name: "port"}},
- DeprecatedStartupProbe: &corev1.Probe{InitialDelaySeconds: 3},
- DeprecatedLifecycle: &corev1.Lifecycle{PostStart: &corev1.LifecycleHandler{Exec: &corev1.ExecAction{
- Command: []string{"lifecycle command"},
- }}},
- DeprecatedTerminationMessagePath: "path",
- DeprecatedTerminationMessagePolicy: corev1.TerminationMessagePolicy("policy"),
- DeprecatedStdin: true,
- DeprecatedStdinOnce: true,
- DeprecatedTTY: true,
- }},
- StepTemplate: &v1beta1.StepTemplate{
- DeprecatedName: "name",
- DeprecatedLivenessProbe: &corev1.Probe{InitialDelaySeconds: 1},
- DeprecatedReadinessProbe: &corev1.Probe{InitialDelaySeconds: 2},
- DeprecatedPorts: []corev1.ContainerPort{{Name: "port"}},
- DeprecatedStartupProbe: &corev1.Probe{InitialDelaySeconds: 3},
- DeprecatedLifecycle: &corev1.Lifecycle{PostStart: &corev1.LifecycleHandler{Exec: &corev1.ExecAction{
- Command: []string{"lifecycle command"},
- }}},
- DeprecatedTerminationMessagePath: "path",
- DeprecatedTerminationMessagePolicy: corev1.TerminationMessagePolicy("policy"),
- DeprecatedStdin: true,
- DeprecatedStdinOnce: true,
- DeprecatedTTY: true,
- },
- },
- },
+ }{{
+ name: "simple taskrun",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
},
- }, {
- name: "taskrun with step Results in step state",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- Spec: v1beta1.TaskRunSpec{},
- Status: v1beta1.TaskRunStatus{
- TaskRunStatusFields: v1beta1.TaskRunStatusFields{
- Steps: []v1beta1.StepState{{
- Results: []v1beta1.TaskRunStepResult{{
- Name: "foo",
- Type: v1beta1.ResultsTypeString,
- Value: v1beta1.ResultValue{
- Type: v1beta1.ParamTypeString,
- StringVal: "bar",
- },
- }},
- }},
+ Spec: v1beta1.TaskRunSpec{
+ TaskRef: &v1beta1.TaskRef{Name: "test-task"},
+ },
+ },
+ }, {
+ name: "taskrun conversion deprecated step fields",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.TaskRunSpec{
+ TaskSpec: &v1beta1.TaskSpec{
+ Steps: []v1beta1.Step{{
+ DeprecatedLivenessProbe: &corev1.Probe{InitialDelaySeconds: 1},
+ DeprecatedReadinessProbe: &corev1.Probe{InitialDelaySeconds: 2},
+ DeprecatedPorts: []corev1.ContainerPort{{Name: "port"}},
+ DeprecatedStartupProbe: &corev1.Probe{InitialDelaySeconds: 3},
+ DeprecatedLifecycle: &corev1.Lifecycle{PostStart: &corev1.LifecycleHandler{Exec: &corev1.ExecAction{
+ Command: []string{"lifecycle command"},
+ }}},
+ DeprecatedTerminationMessagePath: "path",
+ DeprecatedTerminationMessagePolicy: corev1.TerminationMessagePolicy("policy"),
+ DeprecatedStdin: true,
+ DeprecatedStdinOnce: true,
+ DeprecatedTTY: true,
+ }},
+ StepTemplate: &v1beta1.StepTemplate{
+ DeprecatedName: "name",
+ DeprecatedLivenessProbe: &corev1.Probe{InitialDelaySeconds: 1},
+ DeprecatedReadinessProbe: &corev1.Probe{InitialDelaySeconds: 2},
+ DeprecatedPorts: []corev1.ContainerPort{{Name: "port"}},
+ DeprecatedStartupProbe: &corev1.Probe{InitialDelaySeconds: 3},
+ DeprecatedLifecycle: &corev1.Lifecycle{PostStart: &corev1.LifecycleHandler{Exec: &corev1.ExecAction{
+ Command: []string{"lifecycle command"},
+ }}},
+ DeprecatedTerminationMessagePath: "path",
+ DeprecatedTerminationMessagePolicy: corev1.TerminationMessagePolicy("policy"),
+ DeprecatedStdin: true,
+ DeprecatedStdinOnce: true,
+ DeprecatedTTY: true,
},
},
},
- }, {
- name: "taskrun with provenance in step state",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- Spec: v1beta1.TaskRunSpec{},
- Status: v1beta1.TaskRunStatus{
- TaskRunStatusFields: v1beta1.TaskRunStatusFields{
- Steps: []v1beta1.StepState{{
- Provenance: &v1beta1.Provenance{
- RefSource: &v1beta1.RefSource{
- URI: "test-uri",
- Digest: map[string]string{"sha256": "digest"},
- },
+ },
+ }, {
+ name: "taskrun with step Results in step state",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.TaskRunSpec{},
+ Status: v1beta1.TaskRunStatus{
+ TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+ Steps: []v1beta1.StepState{{
+ Results: []v1beta1.TaskRunStepResult{{
+ Name: "foo",
+ Type: v1beta1.ResultsTypeString,
+ Value: v1beta1.ResultValue{
+ Type: v1beta1.ParamTypeString,
+ StringVal: "bar",
},
}},
- },
+ }},
},
},
- }, {
- name: "taskrun conversion all non deprecated fields",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
+ },
+ }, {
+ name: "taskrun conversion all non deprecated fields",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.TaskRunSpec{
+ Debug: &v1beta1.TaskRunDebug{
+ Breakpoints: &v1beta1.TaskBreakpoints{
+ OnFailure: "enabled",
+ },
},
- Spec: v1beta1.TaskRunSpec{
- Debug: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- OnFailure: "enabled",
- BeforeSteps: []string{"step-1", "step-2"},
- },
+ Params: v1beta1.Params{{
+ Name: "param-task-1",
+ Value: v1beta1.ParamValue{
+ ArrayVal: []string{"value-task-1"},
+ Type: "string",
},
- Params: v1beta1.Params{{
- Name: "param-task-1",
- Value: v1beta1.ParamValue{
- ArrayVal: []string{"value-task-1"},
- Type: "string",
- },
+ }},
+ ServiceAccountName: "test-sa",
+ TaskRef: &v1beta1.TaskRef{Name: "test-task"},
+ TaskSpec: &v1beta1.TaskSpec{
+ Params: []v1beta1.ParamSpec{{
+ Name: "param-name",
+ Type: "string",
}},
- ServiceAccountName: "test-sa",
- TaskRef: &v1beta1.TaskRef{Name: "test-task"},
- TaskSpec: &v1beta1.TaskSpec{
- Params: []v1beta1.ParamSpec{{
- Name: "param-name",
- Type: "string",
- }},
+ },
+ Status: "test-task-run-spec-status",
+ StatusMessage: v1beta1.TaskRunSpecStatusMessage("test-status-message"),
+ Timeout: &metav1.Duration{Duration: 5 * time.Second},
+ PodTemplate: &pod.Template{
+ NodeSelector: map[string]string{
+ "label": "value",
},
- Status: "test-task-run-spec-status",
- StatusMessage: v1beta1.TaskRunSpecStatusMessage("test-status-message"),
- Timeout: &metav1.Duration{Duration: 5 * time.Second},
- PodTemplate: &pod.Template{
- NodeSelector: map[string]string{
- "label": "value",
+ },
+ Workspaces: []v1beta1.WorkspaceBinding{{
+ Name: "workspace-volumeclaimtemplate",
+ SubPath: "/foo/bar/baz",
+ VolumeClaimTemplate: &corev1.PersistentVolumeClaim{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pvc",
},
+ Spec: corev1.PersistentVolumeClaimSpec{},
},
- Workspaces: []v1beta1.WorkspaceBinding{
- {
- Name: "workspace-volumeclaimtemplate",
- SubPath: "/foo/bar/baz",
- VolumeClaimTemplate: &corev1.PersistentVolumeClaim{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pvc",
+ }, {
+ Name: "workspace-pvc",
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{},
+ }, {
+ Name: "workspace-emptydir",
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ }, {
+ Name: "workspace-configmap",
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "configbar",
+ },
+ },
+ }, {
+ Name: "workspace-secret",
+ Secret: &corev1.SecretVolumeSource{SecretName: "sname"},
+ }, {
+ Name: "workspace-projected",
+ Projected: &corev1.ProjectedVolumeSource{
+ Sources: []corev1.VolumeProjection{{
+ ConfigMap: &corev1.ConfigMapProjection{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "projected-configmap",
},
- Spec: corev1.PersistentVolumeClaimSpec{},
},
- }, {
- Name: "workspace-pvc",
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{},
- }, {
- Name: "workspace-emptydir",
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- }, {
- Name: "workspace-configmap",
- ConfigMap: &corev1.ConfigMapVolumeSource{
+ Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
- Name: "configbar",
+ Name: "projected-secret",
},
},
- }, {
- Name: "workspace-secret",
- Secret: &corev1.SecretVolumeSource{SecretName: "sname"},
- }, {
- Name: "workspace-projected",
- Projected: &corev1.ProjectedVolumeSource{
- Sources: []corev1.VolumeProjection{{
- ConfigMap: &corev1.ConfigMapProjection{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "projected-configmap",
- },
- },
- Secret: &corev1.SecretProjection{
- LocalObjectReference: corev1.LocalObjectReference{
- Name: "projected-secret",
- },
- },
- ServiceAccountToken: &corev1.ServiceAccountTokenProjection{
- Audience: "projected-sat",
- },
- }},
- },
- }, {
- Name: "workspace-csi",
- CSI: &corev1.CSIVolumeSource{
- NodePublishSecretRef: &corev1.LocalObjectReference{
- Name: "projected-csi",
- },
- VolumeAttributes: map[string]string{"key": "attribute-val"},
+ ServiceAccountToken: &corev1.ServiceAccountTokenProjection{
+ Audience: "projected-sat",
},
- },
- },
- StepOverrides: []v1beta1.TaskRunStepOverride{{
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
}},
},
- SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
- Name: "task-1",
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
- }},
- },
- ComputeResources: &corev1.ResourceRequirements{
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: corev1resources.MustParse("1Gi"),
+ }, {
+ Name: "workspace-csi",
+ CSI: &corev1.CSIVolumeSource{
+ NodePublishSecretRef: &corev1.LocalObjectReference{
+ Name: "projected-csi",
},
+ VolumeAttributes: map[string]string{"key": "attribute-val"},
},
},
- Status: v1beta1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: []apis.Condition{
- {
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- Reason: "Completed",
- Message: "All tasks finished running",
- },
+ },
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
+ },
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Name: "task-1",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
+ }},
+ },
+ ComputeResources: &corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: corev1resources.MustParse("1Gi"),
+ },
+ },
+ },
+ Status: v1beta1.TaskRunStatus{
+ Status: duckv1.Status{
+ Conditions: []apis.Condition{
+ {
+ Type: apis.ConditionSucceeded,
+ Status: corev1.ConditionTrue,
+ Reason: "Completed",
+ Message: "All tasks finished running",
},
- ObservedGeneration: 1,
},
- TaskRunStatusFields: v1beta1.TaskRunStatusFields{
- PodName: "pod-name",
- StartTime: &metav1.Time{Time: time.Now()},
- CompletionTime: &metav1.Time{Time: time.Now().Add(1 * time.Minute)},
- Steps: []v1beta1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 123,
- }},
- Name: "failure",
- ContainerName: "step-failure",
- ImageID: "image-id",
- }},
- Sidecars: []v1beta1.SidecarState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 123,
- },
- },
- Name: "failure",
- ContainerName: "step-failure",
- ImageID: "image-id",
- }},
- RetriesStatus: []v1beta1.TaskRunStatus{{
- Status: duckv1.Status{
- Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionFalse,
- }},
- },
- }},
- TaskRunResults: []v1beta1.TaskRunResult{{
- Name: "resultName",
- Type: v1beta1.ResultsTypeObject,
- Value: *v1beta1.NewObject(map[string]string{"hello": "world"}),
- }},
- TaskSpec: &v1beta1.TaskSpec{
- Description: "test",
- Steps: []v1beta1.Step{{
- Image: "foo",
+ ObservedGeneration: 1,
+ },
+ TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+ PodName: "pod-name",
+ StartTime: &metav1.Time{Time: time.Now()},
+ CompletionTime: &metav1.Time{Time: time.Now().Add(1 * time.Minute)},
+ Steps: []v1beta1.StepState{{
+ ContainerState: corev1.ContainerState{
+ Terminated: &corev1.ContainerStateTerminated{
+ ExitCode: 123,
}},
- Volumes: []corev1.Volume{{}},
- Params: []v1beta1.ParamSpec{{
- Name: "param-1",
- Type: v1beta1.ParamTypeString,
- Description: "My first param",
+
+ Name: "failure",
+ ContainerName: "step-failure",
+ ImageID: "image-id",
+ }},
+ Sidecars: []v1beta1.SidecarState{{
+ ContainerState: corev1.ContainerState{
+ Terminated: &corev1.ContainerStateTerminated{
+ ExitCode: 123,
+ }},
+
+ Name: "failure",
+ ContainerName: "step-failure",
+ ImageID: "image-id",
+ }},
+ RetriesStatus: []v1beta1.TaskRunStatus{{
+ Status: duckv1.Status{
+ Conditions: []apis.Condition{{
+ Type: apis.ConditionSucceeded,
+ Status: corev1.ConditionFalse,
}},
},
- Provenance: &v1beta1.Provenance{
- RefSource: &v1beta1.RefSource{
- URI: "test-uri",
- Digest: map[string]string{"sha256": "digest"},
- },
- FeatureFlags: config.DefaultFeatureFlags.DeepCopy(),
- },
+ }},
+ TaskRunResults: []v1beta1.TaskRunResult{{
+ Name: "resultName",
+ Type: v1beta1.ResultsTypeObject,
+ Value: *v1beta1.NewObject(map[string]string{"hello": "world"}),
+ }},
+ TaskSpec: &v1beta1.TaskSpec{
+ Description: "test",
+ Steps: []v1beta1.Step{{
+ Image: "foo",
+ }},
+ Volumes: []corev1.Volume{{}},
+ Params: []v1beta1.ParamSpec{{
+ Name: "param-1",
+ Type: v1beta1.ParamTypeString,
+ Description: "My first param",
+ }},
},
- },
+ Provenance: &v1beta1.Provenance{
+ RefSource: &v1beta1.RefSource{
+ URI: "test-uri",
+ Digest: map[string]string{"sha256": "digest"},
+ },
+ FeatureFlags: config.DefaultFeatureFlags.DeepCopy(),
+ }},
+ },
+ },
+ }, {
+ name: "taskrun with stepArtifacts in step state",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
},
- }, {
- name: "taskrun with stepArtifacts in step state",
- in: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- Spec: v1beta1.TaskRunSpec{},
- Status: v1beta1.TaskRunStatus{
- TaskRunStatusFields: v1beta1.TaskRunStatusFields{
- Steps: []v1beta1.StepState{{
- Inputs: []v1beta1.TaskRunStepArtifact{{
- Name: "Input",
- Values: []v1beta1.ArtifactValue{
- {
- Uri: "git:example.com",
- Digest: map[v1beta1.Algorithm]string{
- "sha256": "49149151d283ac77d3fd4594825242f076c999903261bd95f79a8b261811c11a",
- "sha1": "22b80854ba81d11d980794952f2343fedf2189d5",
- },
+ Spec: v1beta1.TaskRunSpec{},
+ Status: v1beta1.TaskRunStatus{
+ TaskRunStatusFields: v1beta1.TaskRunStatusFields{
+ Steps: []v1beta1.StepState{{
+ Inputs: []v1beta1.TaskRunStepArtifact{{
+ Name: "Input",
+ Values: []v1beta1.ArtifactValue{
+ {Uri: "git:example.com",
+ Digest: map[v1beta1.Algorithm]string{
+ "sha256": "49149151d283ac77d3fd4594825242f076c999903261bd95f79a8b261811c11a",
+ "sha1": "22b80854ba81d11d980794952f2343fedf2189d5",
},
},
- }},
- Outputs: []v1beta1.TaskRunStepArtifact{{
- Name: "Output",
- Values: []v1beta1.ArtifactValue{
- {
- Uri: "docker:example.aaa/bbb:latest",
- Digest: map[v1beta1.Algorithm]string{
- "sha256": "f05a847a269ccafc90af40ad55aedef62d165227475e4d95ef6812f7c5daa21a",
- },
+ },
+ }},
+ Outputs: []v1beta1.TaskRunStepArtifact{{
+ Name: "Output",
+ Values: []v1beta1.ArtifactValue{
+ {Uri: "docker:example.aaa/bbb:latest",
+ Digest: map[v1beta1.Algorithm]string{
+ "sha256": "f05a847a269ccafc90af40ad55aedef62d165227475e4d95ef6812f7c5daa21a",
},
},
- }},
+ },
}},
- },
+ }},
},
},
},
+ },
}
for _, test := range tests {
@@ -536,6 +510,37 @@ func TestTaskRunConversionFromDeprecated(t *testing.T) {
},
},
},
+ }},
+ }, {
+ name: "bundle",
+ in: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.TaskRunSpec{
+ TaskRef: &v1beta1.TaskRef{
+ Name: "test-bundle-name",
+ Bundle: "test-bundle",
+ },
+ },
+ },
+ want: &v1beta1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "bar",
+ },
+ Spec: v1beta1.TaskRunSpec{
+ TaskRef: &v1beta1.TaskRef{
+ ResolverRef: v1beta1.ResolverRef{
+ Resolver: "bundles",
+ Params: v1beta1.Params{
+ {Name: "bundle", Value: v1beta1.ParamValue{StringVal: "test-bundle", Type: "string"}},
+ {Name: "name", Value: v1beta1.ParamValue{StringVal: "test-bundle-name", Type: "string"}},
+ {Name: "kind", Value: v1beta1.ParamValue{StringVal: "Task", Type: "string"}},
+ },
+ },
+ },
},
},
}, {
@@ -743,8 +748,7 @@ func TestTaskRunConvertTo(t *testing.T) {
}},
},
},
- },
- }}
+ }}}
for _, test := range tests {
versions := []apis.Convertible{&v1.TaskRun{}}
for _, version := range versions {
@@ -809,8 +813,7 @@ func TestTaskRunConvertFrom(t *testing.T) {
}},
},
},
- },
- }}
+ }}}
for _, test := range tests {
versions := []apis.Convertible{&v1beta1.TaskRun{}}
for _, version := range versions {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_types.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_types.go
index 2cd76f0e570..551a5856ed6 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_types.go
@@ -28,7 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/clock"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
@@ -127,9 +126,6 @@ type TaskBreakpoints struct {
// failed step will not exit
// +optional
OnFailure string `json:"onFailure,omitempty"`
- // +optional
- // +listType=atomic
- BeforeSteps []string `json:"beforeSteps,omitempty"`
}
// NeedsDebugOnFailure return true if the TaskRun is configured to debug on failure
@@ -140,28 +136,14 @@ func (trd *TaskRunDebug) NeedsDebugOnFailure() bool {
return trd.Breakpoints.OnFailure == EnabledOnFailureBreakpoint
}
-// NeedsDebugBeforeStep return true if the step is configured to debug before execution
-func (trd *TaskRunDebug) NeedsDebugBeforeStep(stepName string) bool {
- if trd.Breakpoints == nil {
- return false
- }
- beforeStepSets := sets.NewString(trd.Breakpoints.BeforeSteps...)
- return beforeStepSets.Has(stepName)
-}
-
// StepNeedsDebug return true if the step is configured to debug
func (trd *TaskRunDebug) StepNeedsDebug(stepName string) bool {
- return trd.NeedsDebugOnFailure() || trd.NeedsDebugBeforeStep(stepName)
-}
-
-// HaveBeforeSteps return true if have any before steps
-func (trd *TaskRunDebug) HaveBeforeSteps() bool {
- return trd.Breakpoints != nil && len(trd.Breakpoints.BeforeSteps) > 0
+ return trd.NeedsDebugOnFailure()
}
// NeedsDebug return true if defined onfailure or have any before, after steps
func (trd *TaskRunDebug) NeedsDebug() bool {
- return trd.NeedsDebugOnFailure() || trd.HaveBeforeSteps()
+ return trd.NeedsDebugOnFailure()
}
var taskRunCondSet = apis.NewBatchConditionSet()
@@ -390,7 +372,6 @@ type StepState struct {
ContainerName string `json:"container,omitempty"`
ImageID string `json:"imageID,omitempty"`
Results []TaskRunStepResult `json:"results,omitempty"`
- Provenance *Provenance `json:"provenance,omitempty"`
Inputs []TaskRunStepArtifact `json:"inputs,omitempty"`
Outputs []TaskRunStepArtifact `json:"outputs,omitempty"`
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_types_test.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_types_test.go
index e9c61e4bfb1..d2aa439e04e 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_types_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_types_test.go
@@ -427,7 +427,7 @@ func TestHasTimedOut(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
result := tc.taskRun.HasTimedOut(context.Background(), testClock)
if d := cmp.Diff(tc.expectedStatus, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -467,57 +467,6 @@ func TestInitializeTaskRunConditions(t *testing.T) {
}
}
-func TestIsDebugBeforeStep(t *testing.T) {
- type args struct {
- stepName string
- trd *v1beta1.TaskRunDebug
- }
- testCases := []struct {
- name string
- args args
- want bool
- }{
- {
- name: "empty breakpoints",
- args: args{
- stepName: "step1",
- trd: &v1beta1.TaskRunDebug{},
- },
- want: false,
- }, {
- name: "breakpoint before step",
- args: args{
- stepName: "step1",
- trd: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- BeforeSteps: []string{"step1", "step2"},
- },
- },
- },
- want: true,
- }, {
- name: "step not in before step breakpoint",
- args: args{
- stepName: "step3",
- trd: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- BeforeSteps: []string{"step1", "step2"},
- },
- },
- },
- want: false,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- result := tc.args.trd.NeedsDebugBeforeStep(tc.args.stepName)
- if d := cmp.Diff(result, tc.want); d != "" {
- t.Fatal(diff.PrintWantGot(d))
- }
- })
- }
-}
-
func TestIsStepNeedDebug(t *testing.T) {
type args struct {
stepName string
@@ -546,24 +495,13 @@ func TestIsStepNeedDebug(t *testing.T) {
},
},
want: true,
- }, {
- name: "breakpoint before step",
- args: args{
- stepName: "step1",
- trd: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- BeforeSteps: []string{"step1"},
- },
- },
- },
- want: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := tc.args.trd.StepNeedsDebug(tc.args.stepName)
if d := cmp.Diff(tc.want, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -594,23 +532,13 @@ func TestIsNeedDebug(t *testing.T) {
},
},
want: true,
- }, {
- name: "breakpoint before step",
- args: args{
- trd: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- BeforeSteps: []string{"step1"},
- },
- },
- },
- want: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := tc.args.trd.NeedsDebug()
if d := cmp.Diff(tc.want, result); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -643,7 +571,7 @@ func TestTaskRunIsRetriable(t *testing.T) {
wantIsRetriable: false,
}} {
retriesStatus := []v1beta1.TaskRunStatus{}
- for range tc.numRetriesStatus {
+ for i := 0; i < tc.numRetriesStatus; i++ {
retriesStatus = append(retriesStatus, retryStatus)
}
t.Run(tc.name, func(t *testing.T) {
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation.go
index ae14965c770..b44b3d42e2c 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation.go
@@ -26,7 +26,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/validate"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/strings/slices"
"knative.dev/pkg/apis"
@@ -51,9 +50,6 @@ func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError {
// Validate taskrun spec
func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
- // Validate the spec changes
- errs = errs.Also(ts.ValidateUpdate(ctx))
-
// Must have exactly one of taskRef and taskSpec.
if ts.TaskRef == nil && ts.TaskSpec == nil {
errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec"))
@@ -84,11 +80,11 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
errs = errs.Also(validateDebug(ts.Debug).ViaField("debug"))
}
if ts.StepOverrides != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.BetaAPIFields).ViaField("stepOverrides"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides"))
errs = errs.Also(validateStepOverrides(ts.StepOverrides).ViaField("stepOverrides"))
}
if ts.SidecarOverrides != nil {
- errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.BetaAPIFields).ViaField("sidecarOverrides"))
+ errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides"))
errs = errs.Also(validateSidecarOverrides(ts.SidecarOverrides).ViaField("sidecarOverrides"))
}
if ts.ComputeResources != nil {
@@ -122,34 +118,6 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
return errs
}
-// ValidateUpdate validates the update of a TaskRunSpec
-func (ts *TaskRunSpec) ValidateUpdate(ctx context.Context) (errs *apis.FieldError) {
- if !apis.IsInUpdate(ctx) {
- return
- }
- oldObj, ok := apis.GetBaseline(ctx).(*TaskRun)
- if !ok || oldObj == nil {
- return
- }
- old := &oldObj.Spec
-
- // If already in the done state, the spec cannot be modified.
- // Otherwise, only the status, statusMessage field can be modified.
- tips := "Once the TaskRun is complete, no updates are allowed"
- if !oldObj.IsDone() {
- old = old.DeepCopy()
- old.Status = ts.Status
- old.StatusMessage = ts.StatusMessage
- tips = "Once the TaskRun has started, only status and statusMessage updates are allowed"
- }
-
- if !equality.Semantic.DeepEqual(old, ts) {
- errs = errs.Also(apis.ErrInvalidValue(tips, ""))
- }
-
- return
-}
-
// validateInlineParameters validates that any parameters called in the
// Task spec are declared in the TaskRun.
// This is crucial for propagated parameters because the parameters could
@@ -256,21 +224,9 @@ func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) {
if db == nil || db.Breakpoints == nil {
return errs
}
-
- if db.Breakpoints.OnFailure == "" {
- errs = errs.Also(apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "breakpoints.onFailure"))
- }
-
if db.Breakpoints.OnFailure != "" && db.Breakpoints.OnFailure != EnabledOnFailureBreakpoint {
errs = errs.Also(apis.ErrInvalidValue(db.Breakpoints.OnFailure+" is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "breakpoints.onFailure"))
}
- beforeSteps := sets.NewString()
- for i, step := range db.Breakpoints.BeforeSteps {
- if beforeSteps.Has(step) {
- errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("before step must be unique, the same step: %s is defined multiple times at", step), fmt.Sprintf("breakpoints.beforeSteps[%d]", i)))
- }
- beforeSteps.Insert(step)
- }
return errs
}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go
index a80339f31b9..3bab6c0e77c 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/taskrun_validation_test.go
@@ -22,7 +22,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
cfgtesting "github.com/tektoncd/pipeline/pkg/apis/config/testing"
pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
@@ -32,7 +31,6 @@ import (
corev1resources "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
- duckv1 "knative.dev/pkg/apis/duck/v1"
)
func EnableForbiddenEnv(ctx context.Context) context.Context {
@@ -145,21 +143,6 @@ func TestTaskRun_Invalidate(t *testing.T) {
Message: `missing field(s)`,
Paths: []string{"spec.task-words.properties"},
},
- }, {
- name: "uses bundle (deprecated) on creation is disallowed",
- taskRun: &v1beta1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "taskrunrunname",
- },
- Spec: v1beta1.TaskRunSpec{
- TaskRef: &v1beta1.TaskRef{
- Name: "foo",
- Bundle: "example.com/foo/bar",
- },
- },
- },
- want: &apis.FieldError{Message: "must not set the field(s)", Paths: []string{"spec.taskRef.bundle"}},
- wc: apis.WithinCreate,
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
@@ -448,7 +431,7 @@ func TestTaskRun_Validate(t *testing.T) {
},
},
}, {
- name: "beta feature: valid step and sidecar overrides",
+ name: "alpha feature: valid step and sidecar overrides",
taskRun: &v1beta1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Name: "tr"},
Spec: v1beta1.TaskRunSpec{
@@ -467,7 +450,7 @@ func TestTaskRun_Validate(t *testing.T) {
}},
},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}}
for _, ts := range tests {
t.Run(ts.name, func(t *testing.T) {
@@ -701,34 +684,35 @@ func TestTaskRunSpec_Invalidate(t *testing.T) {
wantErr: apis.ErrInvalidValue("turnOn is not a valid onFailure breakpoint value, onFailure breakpoint is only allowed to be set as enabled", "debug.breakpoints.onFailure"),
wc: cfgtesting.EnableAlphaAPIFields,
}, {
- name: "invalid breakpoint duplicate before steps",
+ name: "stepOverride disallowed without alpha feature gate",
spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{
- Name: "my-task",
+ Name: "foo",
},
- Debug: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- BeforeSteps: []string{"step-1", "step-1"},
- OnFailure: "enabled",
+ StepOverrides: []v1beta1.TaskRunStepOverride{{
+ Name: "foo",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
},
- },
+ }},
},
- wantErr: apis.ErrGeneric("before step must be unique, the same step: step-1 is defined multiple times at", "debug.breakpoints.beforeSteps[1]"),
- wc: cfgtesting.EnableAlphaAPIFields,
+ wc: cfgtesting.EnableStableAPIFields,
+ wantErr: apis.ErrGeneric("stepOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
}, {
- name: "empty onFailure breakpoint",
+ name: "sidecarOverride disallowed without alpha feature gate",
spec: v1beta1.TaskRunSpec{
TaskRef: &v1beta1.TaskRef{
- Name: "my-task",
+ Name: "foo",
},
- Debug: &v1beta1.TaskRunDebug{
- Breakpoints: &v1beta1.TaskBreakpoints{
- OnFailure: "",
+ SidecarOverrides: []v1beta1.TaskRunSidecarOverride{{
+ Name: "foo",
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{corev1.ResourceMemory: corev1resources.MustParse("1Gi")},
},
- },
+ }},
},
- wantErr: apis.ErrInvalidValue("onFailure breakpoint is empty, it is only allowed to be set as enabled", "debug.breakpoints.onFailure"),
- wc: cfgtesting.EnableAlphaAPIFields,
+ wc: cfgtesting.EnableStableAPIFields,
+ wantErr: apis.ErrGeneric("sidecarOverrides requires \"enable-api-fields\" feature gate to be \"alpha\" but it is \"stable\""),
}, {
name: "duplicate stepOverride names",
spec: v1beta1.TaskRunSpec{
@@ -941,7 +925,7 @@ func TestTaskRunSpec_Validate(t *testing.T) {
},
}},
},
- wc: cfgtesting.EnableBetaAPIFields,
+ wc: cfgtesting.EnableAlphaAPIFields,
}}
for _, ts := range tests {
@@ -956,148 +940,3 @@ func TestTaskRunSpec_Validate(t *testing.T) {
})
}
}
-
-func TestTaskRunSpec_ValidateUpdate(t *testing.T) {
- tests := []struct {
- name string
- isCreate bool
- isUpdate bool
- baselineTaskRun *v1beta1.TaskRun
- taskRun *v1beta1.TaskRun
- expectedError apis.FieldError
- }{
- {
- name: "is create ctx",
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{},
- },
- isCreate: true,
- isUpdate: false,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, no changes",
- baselineTaskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is nil, skip validation",
- baselineTaskRun: nil,
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Timeout: &metav1.Duration{Duration: 1},
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, only status changes",
- baselineTaskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "",
- StatusMessage: "",
- },
- Status: v1beta1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "TaskRunCancelled",
- StatusMessage: "TaskRun is cancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{},
- }, {
- name: "is update ctx, baseline is unknown, status and timeout changes",
- baselineTaskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "",
- StatusMessage: "",
- Timeout: &metav1.Duration{Duration: 0},
- },
- Status: v1beta1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown},
- },
- },
- },
- },
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "TaskRunCancelled",
- StatusMessage: "TaskRun is cancelled",
- Timeout: &metav1.Duration{Duration: 1},
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the TaskRun has started, only status and statusMessage updates are allowed`,
- Paths: []string{""},
- },
- }, {
- name: "is update ctx, baseline is done, status changes",
- baselineTaskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "",
- },
- Status: v1beta1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue},
- },
- },
- },
- },
- taskRun: &v1beta1.TaskRun{
- Spec: v1beta1.TaskRunSpec{
- Status: "TaskRunCancelled",
- },
- },
- isCreate: false,
- isUpdate: true,
- expectedError: apis.FieldError{
- Message: `invalid value: Once the TaskRun is complete, no updates are allowed`,
- Paths: []string{""},
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: &config.FeatureFlags{},
- Defaults: &config.Defaults{},
- })
- if tt.isCreate {
- ctx = apis.WithinCreate(ctx)
- }
- if tt.isUpdate {
- ctx = apis.WithinUpdate(ctx, tt.baselineTaskRun)
- }
- tr := tt.taskRun
- err := tr.Spec.ValidateUpdate(ctx)
- if d := cmp.Diff(tt.expectedError.Error(), err.Error(), cmpopts.IgnoreUnexported(apis.FieldError{})); d != "" {
- t.Errorf("TaskRunSpec.ValidateUpdate() errors diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/apis/pipeline/v1beta1/when_types.go b/upstream/pkg/apis/pipeline/v1beta1/when_types.go
index ad24f8e62e2..f792ec199c8 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/when_types.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/when_types.go
@@ -98,8 +98,6 @@ func (we *WhenExpression) GetVarSubstitutionExpressions() ([]string, bool) {
// All of them need to evaluate to True for a guarded Task to be executed.
type WhenExpressions []WhenExpression
-type StepWhenExpressions = WhenExpressions
-
// AllowsExecution evaluates an Input's relationship to an array of Values, based on the Operator,
// to determine whether all the When Expressions are True. If they are all True, the guarded Task is
// executed, otherwise it is skipped.
diff --git a/upstream/pkg/apis/pipeline/v1beta1/when_validation.go b/upstream/pkg/apis/pipeline/v1beta1/when_validation.go
index aa6b4b4cbd7..33855040b2b 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/when_validation.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/when_validation.go
@@ -48,7 +48,7 @@ func (wes WhenExpressions) validateWhenExpressionsFields(ctx context.Context) (e
func (we *WhenExpression) validateWhenExpressionFields(ctx context.Context) *apis.FieldError {
if we.CEL != "" {
if !config.FromContextOrDefaults(ctx).FeatureFlags.EnableCELInWhenExpression {
- return apis.ErrGeneric(fmt.Sprintf("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL), "")
+ return apis.ErrGeneric("feature flag %s should be set to true to use CEL: %s in WhenExpression", config.EnableCELInWhenExpression, we.CEL)
}
if we.Input != "" || we.Operator != "" || len(we.Values) != 0 {
return apis.ErrGeneric(fmt.Sprintf("cel and input+operator+values cannot be set in one WhenExpression: %v", we))
diff --git a/upstream/pkg/apis/pipeline/v1beta1/workspace_validation_test.go b/upstream/pkg/apis/pipeline/v1beta1/workspace_validation_test.go
index 8eb0dc471be..43d621ff632 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/workspace_validation_test.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/workspace_validation_test.go
@@ -49,7 +49,7 @@ func TestWorkspaceBindingValidateValid(t *testing.T) {
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
- Resources: corev1.VolumeResourceRequirements{
+ Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"storage": resource.MustParse("1Gi"),
},
diff --git a/upstream/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/upstream/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go
index 8c2afe35a6f..807595a1925 100644
--- a/upstream/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go
+++ b/upstream/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go
@@ -1696,11 +1696,6 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
*out = make([]WorkspaceUsage, len(*in))
copy(*out, *in)
}
- if in.RestartPolicy != nil {
- in, out := &in.RestartPolicy, &out.RestartPolicy
- *out = new(corev1.ContainerRestartPolicy)
- **out = **in
- }
return
}
@@ -1863,13 +1858,6 @@ func (in *Step) DeepCopyInto(out *Step) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.When != nil {
- in, out := &in.When, &out.When
- *out = make(WhenExpressions, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
return
}
@@ -1883,125 +1871,6 @@ func (in *Step) DeepCopy() *Step {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepAction) DeepCopyInto(out *StepAction) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepAction.
-func (in *StepAction) DeepCopy() *StepAction {
- if in == nil {
- return nil
- }
- out := new(StepAction)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *StepAction) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepActionList) DeepCopyInto(out *StepActionList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]StepAction, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionList.
-func (in *StepActionList) DeepCopy() *StepActionList {
- if in == nil {
- return nil
- }
- out := new(StepActionList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *StepActionList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *StepActionSpec) DeepCopyInto(out *StepActionSpec) {
- *out = *in
- if in.Command != nil {
- in, out := &in.Command, &out.Command
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Args != nil {
- in, out := &in.Args, &out.Args
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]corev1.EnvVar, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Params != nil {
- in, out := &in.Params, &out.Params
- *out = make(pipelinev1.ParamSpecs, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Results != nil {
- in, out := &in.Results, &out.Results
- *out = make([]pipelinev1.StepResult, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.SecurityContext != nil {
- in, out := &in.SecurityContext, &out.SecurityContext
- *out = new(corev1.SecurityContext)
- (*in).DeepCopyInto(*out)
- }
- if in.VolumeMounts != nil {
- in, out := &in.VolumeMounts, &out.VolumeMounts
- *out = make([]corev1.VolumeMount, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepActionSpec.
-func (in *StepActionSpec) DeepCopy() *StepActionSpec {
- if in == nil {
- return nil
- }
- out := new(StepActionSpec)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) {
*out = *in
@@ -2029,11 +1898,6 @@ func (in *StepState) DeepCopyInto(out *StepState) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
- if in.Provenance != nil {
- in, out := &in.Provenance, &out.Provenance
- *out = new(Provenance)
- (*in).DeepCopyInto(*out)
- }
if in.Inputs != nil {
in, out := &in.Inputs, &out.Inputs
*out = make([]Artifact, len(*in))
@@ -2174,11 +2038,6 @@ func (in *Task) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TaskBreakpoints) DeepCopyInto(out *TaskBreakpoints) {
*out = *in
- if in.BeforeSteps != nil {
- in, out := &in.BeforeSteps, &out.BeforeSteps
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
return
}
@@ -2369,7 +2228,7 @@ func (in *TaskRunDebug) DeepCopyInto(out *TaskRunDebug) {
if in.Breakpoints != nil {
in, out := &in.Breakpoints, &out.Breakpoints
*out = new(TaskBreakpoints)
- (*in).DeepCopyInto(*out)
+ **out = **in
}
return
}
diff --git a/upstream/pkg/apis/resolution/v1beta1/resolution_request_types.go b/upstream/pkg/apis/resolution/v1beta1/resolution_request_types.go
index f78a4a493c3..60b51fa0498 100644
--- a/upstream/pkg/apis/resolution/v1beta1/resolution_request_types.go
+++ b/upstream/pkg/apis/resolution/v1beta1/resolution_request_types.go
@@ -64,13 +64,6 @@ type ResolutionRequestSpec struct {
// +optional
// +listType=atomic
Params []pipelinev1.Param `json:"params,omitempty"`
- // URL is the runtime url passed to the resolver
- // to help it figure out how to resolver the resource being
- // requested.
- // This is currently at an ALPHA stability level and subject to
- // alpha API compatibility policies.
- // +optional
- URL string `json:"url,omitempty"`
}
// ResolutionRequestStatus are all the fields in a ResolutionRequest's
diff --git a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go
index 326e2fbb252..a142026b2ba 100644
--- a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go
+++ b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go
@@ -44,10 +44,6 @@ func (c *FakeTektonV1beta1) PipelineRuns(namespace string) v1beta1.PipelineRunIn
return &FakePipelineRuns{c, namespace}
}
-func (c *FakeTektonV1beta1) StepActions(namespace string) v1beta1.StepActionInterface {
- return &FakeStepActions{c, namespace}
-}
-
func (c *FakeTektonV1beta1) Tasks(namespace string) v1beta1.TaskInterface {
return &FakeTasks{c, namespace}
}
diff --git a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_stepaction.go b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_stepaction.go
deleted file mode 100644
index 048c9e56c34..00000000000
--- a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_stepaction.go
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- "context"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- testing "k8s.io/client-go/testing"
-)
-
-// FakeStepActions implements StepActionInterface
-type FakeStepActions struct {
- Fake *FakeTektonV1beta1
- ns string
-}
-
-var stepactionsResource = v1beta1.SchemeGroupVersion.WithResource("stepactions")
-
-var stepactionsKind = v1beta1.SchemeGroupVersion.WithKind("StepAction")
-
-// Get takes name of the stepAction, and returns the corresponding stepAction object, and an error if there is any.
-func (c *FakeStepActions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StepAction, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewGetAction(stepactionsResource, c.ns, name), &v1beta1.StepAction{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*v1beta1.StepAction), err
-}
-
-// List takes label and field selectors, and returns the list of StepActions that match those selectors.
-func (c *FakeStepActions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StepActionList, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewListAction(stepactionsResource, stepactionsKind, c.ns, opts), &v1beta1.StepActionList{})
-
- if obj == nil {
- return nil, err
- }
-
- label, _, _ := testing.ExtractFromListOptions(opts)
- if label == nil {
- label = labels.Everything()
- }
- list := &v1beta1.StepActionList{ListMeta: obj.(*v1beta1.StepActionList).ListMeta}
- for _, item := range obj.(*v1beta1.StepActionList).Items {
- if label.Matches(labels.Set(item.Labels)) {
- list.Items = append(list.Items, item)
- }
- }
- return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested stepActions.
-func (c *FakeStepActions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- return c.Fake.
- InvokesWatch(testing.NewWatchAction(stepactionsResource, c.ns, opts))
-
-}
-
-// Create takes the representation of a stepAction and creates it. Returns the server's representation of the stepAction, and an error, if there is any.
-func (c *FakeStepActions) Create(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.CreateOptions) (result *v1beta1.StepAction, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewCreateAction(stepactionsResource, c.ns, stepAction), &v1beta1.StepAction{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*v1beta1.StepAction), err
-}
-
-// Update takes the representation of a stepAction and updates it. Returns the server's representation of the stepAction, and an error, if there is any.
-func (c *FakeStepActions) Update(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.UpdateOptions) (result *v1beta1.StepAction, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateAction(stepactionsResource, c.ns, stepAction), &v1beta1.StepAction{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*v1beta1.StepAction), err
-}
-
-// Delete takes name of the stepAction and deletes it. Returns an error if one occurs.
-func (c *FakeStepActions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- _, err := c.Fake.
- Invokes(testing.NewDeleteActionWithOptions(stepactionsResource, c.ns, name, opts), &v1beta1.StepAction{})
-
- return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeStepActions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- action := testing.NewDeleteCollectionAction(stepactionsResource, c.ns, listOpts)
-
- _, err := c.Fake.Invokes(action, &v1beta1.StepActionList{})
- return err
-}
-
-// Patch applies the patch and returns the patched stepAction.
-func (c *FakeStepActions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StepAction, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(stepactionsResource, c.ns, name, pt, data, subresources...), &v1beta1.StepAction{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*v1beta1.StepAction), err
-}
diff --git a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go
index 87f277c5c7a..b9f3554be3e 100644
--- a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go
+++ b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go
@@ -26,8 +26,6 @@ type PipelineExpansion interface{}
type PipelineRunExpansion interface{}
-type StepActionExpansion interface{}
-
type TaskExpansion interface{}
type TaskRunExpansion interface{}
diff --git a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go
index fcd65e7ce35..0974d31771a 100644
--- a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go
+++ b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go
@@ -32,7 +32,6 @@ type TektonV1beta1Interface interface {
CustomRunsGetter
PipelinesGetter
PipelineRunsGetter
- StepActionsGetter
TasksGetter
TaskRunsGetter
}
@@ -58,10 +57,6 @@ func (c *TektonV1beta1Client) PipelineRuns(namespace string) PipelineRunInterfac
return newPipelineRuns(c, namespace)
}
-func (c *TektonV1beta1Client) StepActions(namespace string) StepActionInterface {
- return newStepActions(c, namespace)
-}
-
func (c *TektonV1beta1Client) Tasks(namespace string) TaskInterface {
return newTasks(c, namespace)
}
diff --git a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/stepaction.go b/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/stepaction.go
deleted file mode 100644
index 388f0629540..00000000000
--- a/upstream/pkg/client/clientset/versioned/typed/pipeline/v1beta1/stepaction.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- "context"
- "time"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
-)
-
-// StepActionsGetter has a method to return a StepActionInterface.
-// A group's client should implement this interface.
-type StepActionsGetter interface {
- StepActions(namespace string) StepActionInterface
-}
-
-// StepActionInterface has methods to work with StepAction resources.
-type StepActionInterface interface {
- Create(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.CreateOptions) (*v1beta1.StepAction, error)
- Update(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.UpdateOptions) (*v1beta1.StepAction, error)
- Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StepAction, error)
- List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StepActionList, error)
- Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StepAction, err error)
- StepActionExpansion
-}
-
-// stepActions implements StepActionInterface
-type stepActions struct {
- client rest.Interface
- ns string
-}
-
-// newStepActions returns a StepActions
-func newStepActions(c *TektonV1beta1Client, namespace string) *stepActions {
- return &stepActions{
- client: c.RESTClient(),
- ns: namespace,
- }
-}
-
-// Get takes name of the stepAction, and returns the corresponding stepAction object, and an error if there is any.
-func (c *stepActions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StepAction, err error) {
- result = &v1beta1.StepAction{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("stepactions").
- Name(name).
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// List takes label and field selectors, and returns the list of StepActions that match those selectors.
-func (c *stepActions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StepActionList, err error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- result = &v1beta1.StepActionList{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("stepactions").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Do(ctx).
- Into(result)
- return
-}
-
-// Watch returns a watch.Interface that watches the requested stepActions.
-func (c *stepActions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- opts.Watch = true
- return c.client.Get().
- Namespace(c.ns).
- Resource("stepactions").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Watch(ctx)
-}
-
-// Create takes the representation of a stepAction and creates it. Returns the server's representation of the stepAction, and an error, if there is any.
-func (c *stepActions) Create(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.CreateOptions) (result *v1beta1.StepAction, err error) {
- result = &v1beta1.StepAction{}
- err = c.client.Post().
- Namespace(c.ns).
- Resource("stepactions").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(stepAction).
- Do(ctx).
- Into(result)
- return
-}
-
-// Update takes the representation of a stepAction and updates it. Returns the server's representation of the stepAction, and an error, if there is any.
-func (c *stepActions) Update(ctx context.Context, stepAction *v1beta1.StepAction, opts v1.UpdateOptions) (result *v1beta1.StepAction, err error) {
- result = &v1beta1.StepAction{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("stepactions").
- Name(stepAction.Name).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(stepAction).
- Do(ctx).
- Into(result)
- return
-}
-
-// Delete takes name of the stepAction and deletes it. Returns an error if one occurs.
-func (c *stepActions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- return c.client.Delete().
- Namespace(c.ns).
- Resource("stepactions").
- Name(name).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *stepActions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- var timeout time.Duration
- if listOpts.TimeoutSeconds != nil {
- timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
- }
- return c.client.Delete().
- Namespace(c.ns).
- Resource("stepactions").
- VersionedParams(&listOpts, scheme.ParameterCodec).
- Timeout(timeout).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// Patch applies the patch and returns the patched stepAction.
-func (c *stepActions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StepAction, err error) {
- result = &v1beta1.StepAction{}
- err = c.client.Patch(pt).
- Namespace(c.ns).
- Resource("stepactions").
- Name(name).
- SubResource(subresources...).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
diff --git a/upstream/pkg/client/informers/externalversions/factory.go b/upstream/pkg/client/informers/externalversions/factory.go
index 839585d5076..081412ecce7 100644
--- a/upstream/pkg/client/informers/externalversions/factory.go
+++ b/upstream/pkg/client/informers/externalversions/factory.go
@@ -42,7 +42,6 @@ type sharedInformerFactory struct {
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
- transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
@@ -81,14 +80,6 @@ func WithNamespace(namespace string) SharedInformerOption {
}
}
-// WithTransform sets a transform on all informers.
-func WithTransform(transform cache.TransformFunc) SharedInformerOption {
- return func(factory *sharedInformerFactory) *sharedInformerFactory {
- factory.transform = transform
- return factory
- }
-}
-
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
@@ -193,7 +184,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
}
informer = newFunc(f.client, resyncPeriod)
- informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
diff --git a/upstream/pkg/client/informers/externalversions/generic.go b/upstream/pkg/client/informers/externalversions/generic.go
index fe44a25ab11..cec37e05376 100644
--- a/upstream/pkg/client/informers/externalversions/generic.go
+++ b/upstream/pkg/client/informers/externalversions/generic.go
@@ -81,8 +81,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().Pipelines().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("pipelineruns"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().PipelineRuns().Informer()}, nil
- case v1beta1.SchemeGroupVersion.WithResource("stepactions"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().StepActions().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("tasks"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().Tasks().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("taskruns"):
diff --git a/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go b/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go
index 2821b942ca9..307843a8014 100644
--- a/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go
+++ b/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go
@@ -32,8 +32,6 @@ type Interface interface {
Pipelines() PipelineInformer
// PipelineRuns returns a PipelineRunInformer.
PipelineRuns() PipelineRunInformer
- // StepActions returns a StepActionInformer.
- StepActions() StepActionInformer
// Tasks returns a TaskInformer.
Tasks() TaskInformer
// TaskRuns returns a TaskRunInformer.
@@ -71,11 +69,6 @@ func (v *version) PipelineRuns() PipelineRunInformer {
return &pipelineRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
-// StepActions returns a StepActionInformer.
-func (v *version) StepActions() StepActionInformer {
- return &stepActionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
-
// Tasks returns a TaskInformer.
func (v *version) Tasks() TaskInformer {
return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
diff --git a/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/stepaction.go b/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/stepaction.go
deleted file mode 100644
index 4ec8578199e..00000000000
--- a/upstream/pkg/client/informers/externalversions/pipeline/v1beta1/stepaction.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- "context"
- time "time"
-
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
- internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces"
- v1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- watch "k8s.io/apimachinery/pkg/watch"
- cache "k8s.io/client-go/tools/cache"
-)
-
-// StepActionInformer provides access to a shared informer and lister for
-// StepActions.
-type StepActionInformer interface {
- Informer() cache.SharedIndexInformer
- Lister() v1beta1.StepActionLister
-}
-
-type stepActionInformer struct {
- factory internalinterfaces.SharedInformerFactory
- tweakListOptions internalinterfaces.TweakListOptionsFunc
- namespace string
-}
-
-// NewStepActionInformer constructs a new informer for StepAction type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
- return NewFilteredStepActionInformer(client, namespace, resyncPeriod, indexers, nil)
-}
-
-// NewFilteredStepActionInformer constructs a new informer for StepAction type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredStepActionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
- return cache.NewSharedIndexInformer(
- &cache.ListWatch{
- ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.TektonV1beta1().StepActions(namespace).List(context.TODO(), options)
- },
- WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.TektonV1beta1().StepActions(namespace).Watch(context.TODO(), options)
- },
- },
- &pipelinev1beta1.StepAction{},
- resyncPeriod,
- indexers,
- )
-}
-
-func (f *stepActionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
- return NewFilteredStepActionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *stepActionInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&pipelinev1beta1.StepAction{}, f.defaultInformer)
-}
-
-func (f *stepActionInformer) Lister() v1beta1.StepActionLister {
- return v1beta1.NewStepActionLister(f.Informer().GetIndexer())
-}
diff --git a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/fake/fake.go b/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/fake/fake.go
deleted file mode 100644
index 7372bf60da5..00000000000
--- a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/fake/fake.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package fake
-
-import (
- context "context"
-
- fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake"
- stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction"
- controller "knative.dev/pkg/controller"
- injection "knative.dev/pkg/injection"
-)
-
-var Get = stepaction.Get
-
-func init() {
- injection.Fake.RegisterInformer(withInformer)
-}
-
-func withInformer(ctx context.Context) (context.Context, controller.Informer) {
- f := fake.Get(ctx)
- inf := f.Tekton().V1beta1().StepActions()
- return context.WithValue(ctx, stepaction.Key{}, inf), inf.Informer()
-}
diff --git a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/fake/fake.go b/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/fake/fake.go
deleted file mode 100644
index 209d758a1de..00000000000
--- a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/fake/fake.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package fake
-
-import (
- context "context"
-
- factoryfiltered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
- filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered"
- controller "knative.dev/pkg/controller"
- injection "knative.dev/pkg/injection"
- logging "knative.dev/pkg/logging"
-)
-
-var Get = filtered.Get
-
-func init() {
- injection.Fake.RegisterFilteredInformers(withInformer)
-}
-
-func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
- untyped := ctx.Value(factoryfiltered.LabelKey{})
- if untyped == nil {
- logging.FromContext(ctx).Panic(
- "Unable to fetch labelkey from context.")
- }
- labelSelectors := untyped.([]string)
- infs := []controller.Informer{}
- for _, selector := range labelSelectors {
- f := factoryfiltered.Get(ctx, selector)
- inf := f.Tekton().V1beta1().StepActions()
- ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf)
- infs = append(infs, inf.Informer())
- }
- return ctx, infs
-}
diff --git a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/stepaction.go b/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/stepaction.go
deleted file mode 100644
index efc517d9ded..00000000000
--- a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/filtered/stepaction.go
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package filtered
-
-import (
- context "context"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
- filtered "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/filtered"
- controller "knative.dev/pkg/controller"
- injection "knative.dev/pkg/injection"
- logging "knative.dev/pkg/logging"
-)
-
-func init() {
- injection.Default.RegisterFilteredInformers(withInformer)
-}
-
-// Key is used for associating the Informer inside the context.Context.
-type Key struct {
- Selector string
-}
-
-func withInformer(ctx context.Context) (context.Context, []controller.Informer) {
- untyped := ctx.Value(filtered.LabelKey{})
- if untyped == nil {
- logging.FromContext(ctx).Panic(
- "Unable to fetch labelkey from context.")
- }
- labelSelectors := untyped.([]string)
- infs := []controller.Informer{}
- for _, selector := range labelSelectors {
- f := filtered.Get(ctx, selector)
- inf := f.Tekton().V1beta1().StepActions()
- ctx = context.WithValue(ctx, Key{Selector: selector}, inf)
- infs = append(infs, inf.Informer())
- }
- return ctx, infs
-}
-
-// Get extracts the typed informer from the context.
-func Get(ctx context.Context, selector string) v1beta1.StepActionInformer {
- untyped := ctx.Value(Key{Selector: selector})
- if untyped == nil {
- logging.FromContext(ctx).Panicf(
- "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.StepActionInformer with selector %s from context.", selector)
- }
- return untyped.(v1beta1.StepActionInformer)
-}
diff --git a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/stepaction.go b/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/stepaction.go
deleted file mode 100644
index ffb873d1965..00000000000
--- a/upstream/pkg/client/injection/informers/pipeline/v1beta1/stepaction/stepaction.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package stepaction
-
-import (
- context "context"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1"
- factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory"
- controller "knative.dev/pkg/controller"
- injection "knative.dev/pkg/injection"
- logging "knative.dev/pkg/logging"
-)
-
-func init() {
- injection.Default.RegisterInformer(withInformer)
-}
-
-// Key is used for associating the Informer inside the context.Context.
-type Key struct{}
-
-func withInformer(ctx context.Context) (context.Context, controller.Informer) {
- f := factory.Get(ctx)
- inf := f.Tekton().V1beta1().StepActions()
- return context.WithValue(ctx, Key{}, inf), inf.Informer()
-}
-
-// Get extracts the typed informer from the context.
-func Get(ctx context.Context) v1beta1.StepActionInformer {
- untyped := ctx.Value(Key{})
- if untyped == nil {
- logging.FromContext(ctx).Panic(
- "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.StepActionInformer from context.")
- }
- return untyped.(v1beta1.StepActionInformer)
-}
diff --git a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/controller.go b/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/controller.go
deleted file mode 100644
index b5caba0fdb6..00000000000
--- a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/controller.go
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package stepaction
-
-import (
- context "context"
- fmt "fmt"
- reflect "reflect"
- strings "strings"
-
- versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
- client "github.com/tektoncd/pipeline/pkg/client/injection/client"
- stepaction "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction"
- zap "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- scheme "k8s.io/client-go/kubernetes/scheme"
- v1 "k8s.io/client-go/kubernetes/typed/core/v1"
- record "k8s.io/client-go/tools/record"
- kubeclient "knative.dev/pkg/client/injection/kube/client"
- controller "knative.dev/pkg/controller"
- logging "knative.dev/pkg/logging"
- logkey "knative.dev/pkg/logging/logkey"
- reconciler "knative.dev/pkg/reconciler"
-)
-
-const (
- defaultControllerAgentName = "stepaction-controller"
- defaultFinalizerName = "stepactions.tekton.dev"
-)
-
-// NewImpl returns a controller.Impl that handles queuing and feeding work from
-// the queue through an implementation of controller.Reconciler, delegating to
-// the provided Interface and optional Finalizer methods. OptionsFn is used to return
-// controller.ControllerOptions to be used by the internal reconciler.
-func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
- logger := logging.FromContext(ctx)
-
- // Check the options function input. It should be 0 or 1.
- if len(optionsFns) > 1 {
- logger.Fatal("Up to one options function is supported, found: ", len(optionsFns))
- }
-
- stepactionInformer := stepaction.Get(ctx)
-
- lister := stepactionInformer.Lister()
-
- var promoteFilterFunc func(obj interface{}) bool
- var promoteFunc = func(bkt reconciler.Bucket) {}
-
- rec := &reconcilerImpl{
- LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
- PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
-
- // Signal promotion event
- promoteFunc(bkt)
-
- all, err := lister.List(labels.Everything())
- if err != nil {
- return err
- }
- for _, elt := range all {
- if promoteFilterFunc != nil {
- if ok := promoteFilterFunc(elt); !ok {
- continue
- }
- }
- enq(bkt, types.NamespacedName{
- Namespace: elt.GetNamespace(),
- Name: elt.GetName(),
- })
- }
- return nil
- },
- },
- Client: client.Get(ctx),
- Lister: lister,
- reconciler: r,
- finalizerName: defaultFinalizerName,
- }
-
- ctrType := reflect.TypeOf(r).Elem()
- ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name())
- ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".")
-
- logger = logger.With(
- zap.String(logkey.ControllerType, ctrTypeName),
- zap.String(logkey.Kind, "tekton.dev.StepAction"),
- )
-
- impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger})
- agentName := defaultControllerAgentName
-
- // Pass impl to the options. Save any optional results.
- for _, fn := range optionsFns {
- opts := fn(impl)
- if opts.ConfigStore != nil {
- rec.configStore = opts.ConfigStore
- }
- if opts.FinalizerName != "" {
- rec.finalizerName = opts.FinalizerName
- }
- if opts.AgentName != "" {
- agentName = opts.AgentName
- }
- if opts.DemoteFunc != nil {
- rec.DemoteFunc = opts.DemoteFunc
- }
- if opts.PromoteFilterFunc != nil {
- promoteFilterFunc = opts.PromoteFilterFunc
- }
- if opts.PromoteFunc != nil {
- promoteFunc = opts.PromoteFunc
- }
- }
-
- rec.Recorder = createRecorder(ctx, agentName)
-
- return impl
-}
-
-func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
- logger := logging.FromContext(ctx)
-
- recorder := controller.GetEventRecorder(ctx)
- if recorder == nil {
- // Create event broadcaster
- logger.Debug("Creating event broadcaster")
- eventBroadcaster := record.NewBroadcaster()
- watches := []watch.Interface{
- eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
- eventBroadcaster.StartRecordingToSink(
- &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
- }
- recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
- go func() {
- <-ctx.Done()
- for _, w := range watches {
- w.Stop()
- }
- }()
- }
-
- return recorder
-}
-
-func init() {
- versionedscheme.AddToScheme(scheme.Scheme)
-}
diff --git a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/reconciler.go b/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/reconciler.go
deleted file mode 100644
index db1600a9be0..00000000000
--- a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/reconciler.go
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package stepaction
-
-import (
- context "context"
- json "encoding/json"
- fmt "fmt"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1"
- zap "go.uber.org/zap"
- v1 "k8s.io/api/core/v1"
- errors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- types "k8s.io/apimachinery/pkg/types"
- sets "k8s.io/apimachinery/pkg/util/sets"
- record "k8s.io/client-go/tools/record"
- controller "knative.dev/pkg/controller"
- logging "knative.dev/pkg/logging"
- reconciler "knative.dev/pkg/reconciler"
-)
-
-// Interface defines the strongly typed interfaces to be implemented by a
-// controller reconciling v1beta1.StepAction.
-type Interface interface {
- // ReconcileKind implements custom logic to reconcile v1beta1.StepAction. Any changes
- // to the objects .Status or .Finalizers will be propagated to the stored
- // object. It is recommended that implementors do not call any update calls
- // for the Kind inside of ReconcileKind, it is the responsibility of the calling
- // controller to propagate those properties. The resource passed to ReconcileKind
- // will always have an empty deletion timestamp.
- ReconcileKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
-}
-
-// Finalizer defines the strongly typed interfaces to be implemented by a
-// controller finalizing v1beta1.StepAction.
-type Finalizer interface {
- // FinalizeKind implements custom logic to finalize v1beta1.StepAction. Any changes
- // to the objects .Status or .Finalizers will be ignored. Returning a nil or
- // Normal type reconciler.Event will allow the finalizer to be deleted on
- // the resource. The resource passed to FinalizeKind will always have a set
- // deletion timestamp.
- FinalizeKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
-}
-
-// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a
-// controller reconciling v1beta1.StepAction if they want to process resources for which
-// they are not the leader.
-type ReadOnlyInterface interface {
- // ObserveKind implements logic to observe v1beta1.StepAction.
- // This method should not write to the API.
- ObserveKind(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
-}
-
-type doReconcile func(ctx context.Context, o *v1beta1.StepAction) reconciler.Event
-
-// reconcilerImpl implements controller.Reconciler for v1beta1.StepAction resources.
-type reconcilerImpl struct {
- // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware.
- reconciler.LeaderAwareFuncs
-
- // Client is used to write back status updates.
- Client versioned.Interface
-
- // Listers index properties about resources.
- Lister pipelinev1beta1.StepActionLister
-
- // Recorder is an event recorder for recording Event resources to the
- // Kubernetes API.
- Recorder record.EventRecorder
-
- // configStore allows for decorating a context with config maps.
- // +optional
- configStore reconciler.ConfigStore
-
- // reconciler is the implementation of the business logic of the resource.
- reconciler Interface
-
- // finalizerName is the name of the finalizer to reconcile.
- finalizerName string
-}
-
-// Check that our Reconciler implements controller.Reconciler.
-var _ controller.Reconciler = (*reconcilerImpl)(nil)
-
-// Check that our generated Reconciler is always LeaderAware.
-var _ reconciler.LeaderAware = (*reconcilerImpl)(nil)
-
-func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.StepActionLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler {
- // Check the options function input. It should be 0 or 1.
- if len(options) > 1 {
- logger.Fatal("Up to one options struct is supported, found: ", len(options))
- }
-
- // Fail fast when users inadvertently implement the other LeaderAware interface.
- // For the typed reconcilers, Promote shouldn't take any arguments.
- if _, ok := r.(reconciler.LeaderAware); ok {
- logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r)
- }
-
- rec := &reconcilerImpl{
- LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
- PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
- all, err := lister.List(labels.Everything())
- if err != nil {
- return err
- }
- for _, elt := range all {
- // TODO: Consider letting users specify a filter in options.
- enq(bkt, types.NamespacedName{
- Namespace: elt.GetNamespace(),
- Name: elt.GetName(),
- })
- }
- return nil
- },
- },
- Client: client,
- Lister: lister,
- Recorder: recorder,
- reconciler: r,
- finalizerName: defaultFinalizerName,
- }
-
- for _, opts := range options {
- if opts.ConfigStore != nil {
- rec.configStore = opts.ConfigStore
- }
- if opts.FinalizerName != "" {
- rec.finalizerName = opts.FinalizerName
- }
- if opts.DemoteFunc != nil {
- rec.DemoteFunc = opts.DemoteFunc
- }
- }
-
- return rec
-}
-
-// Reconcile implements controller.Reconciler
-func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
- logger := logging.FromContext(ctx)
-
- // Initialize the reconciler state. This will convert the namespace/name
- // string into a distinct namespace and name, determine if this instance of
- // the reconciler is the leader, and any additional interfaces implemented
- // by the reconciler. Returns an error is the resource key is invalid.
- s, err := newState(key, r)
- if err != nil {
- logger.Error("Invalid resource key: ", key)
- return nil
- }
-
- // If we are not the leader, and we don't implement either ReadOnly
- // observer interfaces, then take a fast-path out.
- if s.isNotLeaderNorObserver() {
- return controller.NewSkipKey(key)
- }
-
- // If configStore is set, attach the frozen configuration to the context.
- if r.configStore != nil {
- ctx = r.configStore.ToContext(ctx)
- }
-
- // Add the recorder to context.
- ctx = controller.WithEventRecorder(ctx, r.Recorder)
-
- // Get the resource with this namespace/name.
-
- getter := r.Lister.StepActions(s.namespace)
-
- original, err := getter.Get(s.name)
-
- if errors.IsNotFound(err) {
- // The resource may no longer exist, in which case we stop processing and call
- // the ObserveDeletion handler if appropriate.
- logger.Debugf("Resource %q no longer exists", key)
- if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok {
- return del.ObserveDeletion(ctx, types.NamespacedName{
- Namespace: s.namespace,
- Name: s.name,
- })
- }
- return nil
- } else if err != nil {
- return err
- }
-
- // Don't modify the informers copy.
- resource := original.DeepCopy()
-
- var reconcileEvent reconciler.Event
-
- name, do := s.reconcileMethodFor(resource)
- // Append the target method to the logger.
- logger = logger.With(zap.String("targetMethod", name))
- switch name {
- case reconciler.DoReconcileKind:
- // Set and update the finalizer on resource if r.reconciler
- // implements Finalizer.
- if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil {
- return fmt.Errorf("failed to set finalizers: %w", err)
- }
-
- // Reconcile this copy of the resource and then write back any status
- // updates regardless of whether the reconciliation errored out.
- reconcileEvent = do(ctx, resource)
-
- case reconciler.DoFinalizeKind:
- // For finalizing reconcilers, if this resource being marked for deletion
- // and reconciled cleanly (nil or normal event), remove the finalizer.
- reconcileEvent = do(ctx, resource)
-
- if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil {
- return fmt.Errorf("failed to clear finalizers: %w", err)
- }
-
- case reconciler.DoObserveKind:
- // Observe any changes to this resource, since we are not the leader.
- reconcileEvent = do(ctx, resource)
-
- }
-
- // Report the reconciler event, if any.
- if reconcileEvent != nil {
- var event *reconciler.ReconcilerEvent
- if reconciler.EventAs(reconcileEvent, &event) {
- logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
- r.Recorder.Event(resource, event.EventType, event.Reason, event.Error())
-
- // the event was wrapped inside an error, consider the reconciliation as failed
- if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent {
- return reconcileEvent
- }
- return nil
- }
-
- if controller.IsSkipKey(reconcileEvent) {
- // This is a wrapped error, don't emit an event.
- } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok {
- // This is a wrapped error, don't emit an event.
- } else {
- logger.Errorw("Returned an error", zap.Error(reconcileEvent))
- r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
- }
- return reconcileEvent
- }
-
- return nil
-}
-
-// updateFinalizersFiltered will update the Finalizers of the resource.
-// TODO: this method could be generic and sync all finalizers. For now it only
-// updates defaultFinalizerName or its override.
-func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.StepAction, desiredFinalizers sets.Set[string]) (*v1beta1.StepAction, error) {
- // Don't modify the informers copy.
- existing := resource.DeepCopy()
-
- var finalizers []string
-
- // If there's nothing to update, just return.
- existingFinalizers := sets.New[string](existing.Finalizers...)
-
- if desiredFinalizers.Has(r.finalizerName) {
- if existingFinalizers.Has(r.finalizerName) {
- // Nothing to do.
- return resource, nil
- }
- // Add the finalizer.
- finalizers = append(existing.Finalizers, r.finalizerName)
- } else {
- if !existingFinalizers.Has(r.finalizerName) {
- // Nothing to do.
- return resource, nil
- }
- // Remove the finalizer.
- existingFinalizers.Delete(r.finalizerName)
- finalizers = sets.List(existingFinalizers)
- }
-
- mergePatch := map[string]interface{}{
- "metadata": map[string]interface{}{
- "finalizers": finalizers,
- "resourceVersion": existing.ResourceVersion,
- },
- }
-
- patch, err := json.Marshal(mergePatch)
- if err != nil {
- return resource, err
- }
-
- patcher := r.Client.TektonV1beta1().StepActions(resource.Namespace)
-
- resourceName := resource.Name
- updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{})
- if err != nil {
- r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed",
- "Failed to update finalizers for %q: %v", resourceName, err)
- } else {
- r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate",
- "Updated %q finalizers", resource.GetName())
- }
- return updated, err
-}
-
-func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.StepAction) (*v1beta1.StepAction, error) {
- if _, ok := r.reconciler.(Finalizer); !ok {
- return resource, nil
- }
-
- finalizers := sets.New[string](resource.Finalizers...)
-
- // If this resource is not being deleted, mark the finalizer.
- if resource.GetDeletionTimestamp().IsZero() {
- finalizers.Insert(r.finalizerName)
- }
-
- // Synchronize the finalizers filtered by r.finalizerName.
- return r.updateFinalizersFiltered(ctx, resource, finalizers)
-}
-
-func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.StepAction, reconcileEvent reconciler.Event) (*v1beta1.StepAction, error) {
- if _, ok := r.reconciler.(Finalizer); !ok {
- return resource, nil
- }
- if resource.GetDeletionTimestamp().IsZero() {
- return resource, nil
- }
-
- finalizers := sets.New[string](resource.Finalizers...)
-
- if reconcileEvent != nil {
- var event *reconciler.ReconcilerEvent
- if reconciler.EventAs(reconcileEvent, &event) {
- if event.EventType == v1.EventTypeNormal {
- finalizers.Delete(r.finalizerName)
- }
- }
- } else {
- finalizers.Delete(r.finalizerName)
- }
-
- // Synchronize the finalizers filtered by r.finalizerName.
- return r.updateFinalizersFiltered(ctx, resource, finalizers)
-}
diff --git a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/state.go b/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/state.go
deleted file mode 100644
index fa6e4b914f8..00000000000
--- a/upstream/pkg/client/injection/reconciler/pipeline/v1beta1/stepaction/state.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by injection-gen. DO NOT EDIT.
-
-package stepaction
-
-import (
- fmt "fmt"
-
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- types "k8s.io/apimachinery/pkg/types"
- cache "k8s.io/client-go/tools/cache"
- reconciler "knative.dev/pkg/reconciler"
-)
-
-// state is used to track the state of a reconciler in a single run.
-type state struct {
- // key is the original reconciliation key from the queue.
- key string
- // namespace is the namespace split from the reconciliation key.
- namespace string
- // name is the name split from the reconciliation key.
- name string
- // reconciler is the reconciler.
- reconciler Interface
- // roi is the read only interface cast of the reconciler.
- roi ReadOnlyInterface
- // isROI (Read Only Interface) the reconciler only observes reconciliation.
- isROI bool
- // isLeader the instance of the reconciler is the elected leader.
- isLeader bool
-}
-
-func newState(key string, r *reconcilerImpl) (*state, error) {
- // Convert the namespace/name string into a distinct namespace and name.
- namespace, name, err := cache.SplitMetaNamespaceKey(key)
- if err != nil {
- return nil, fmt.Errorf("invalid resource key: %s", key)
- }
-
- roi, isROI := r.reconciler.(ReadOnlyInterface)
-
- isLeader := r.IsLeaderFor(types.NamespacedName{
- Namespace: namespace,
- Name: name,
- })
-
- return &state{
- key: key,
- namespace: namespace,
- name: name,
- reconciler: r.reconciler,
- roi: roi,
- isROI: isROI,
- isLeader: isLeader,
- }, nil
-}
-
-// isNotLeaderNorObserver checks to see if this reconciler with the current
-// state is enabled to do any work or not.
-// isNotLeaderNorObserver returns true when there is no work possible for the
-// reconciler.
-func (s *state) isNotLeaderNorObserver() bool {
- if !s.isLeader && !s.isROI {
- // If we are not the leader, and we don't implement the ReadOnly
- // interface, then take a fast-path out.
- return true
- }
- return false
-}
-
-func (s *state) reconcileMethodFor(o *v1beta1.StepAction) (string, doReconcile) {
- if o.GetDeletionTimestamp().IsZero() {
- if s.isLeader {
- return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
- } else if s.isROI {
- return reconciler.DoObserveKind, s.roi.ObserveKind
- }
- } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
- return reconciler.DoFinalizeKind, fin.FinalizeKind
- }
- return "unknown", nil
-}
diff --git a/upstream/pkg/client/listers/pipeline/v1beta1/expansion_generated.go b/upstream/pkg/client/listers/pipeline/v1beta1/expansion_generated.go
index 0fe1994d1d8..db5d996e615 100644
--- a/upstream/pkg/client/listers/pipeline/v1beta1/expansion_generated.go
+++ b/upstream/pkg/client/listers/pipeline/v1beta1/expansion_generated.go
@@ -46,14 +46,6 @@ type PipelineRunListerExpansion interface{}
// PipelineRunNamespaceLister.
type PipelineRunNamespaceListerExpansion interface{}
-// StepActionListerExpansion allows custom methods to be added to
-// StepActionLister.
-type StepActionListerExpansion interface{}
-
-// StepActionNamespaceListerExpansion allows custom methods to be added to
-// StepActionNamespaceLister.
-type StepActionNamespaceListerExpansion interface{}
-
// TaskListerExpansion allows custom methods to be added to
// TaskLister.
type TaskListerExpansion interface{}
diff --git a/upstream/pkg/client/listers/pipeline/v1beta1/stepaction.go b/upstream/pkg/client/listers/pipeline/v1beta1/stepaction.go
deleted file mode 100644
index 1925738258d..00000000000
--- a/upstream/pkg/client/listers/pipeline/v1beta1/stepaction.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright 2020 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// StepActionLister helps list StepActions.
-// All objects returned here must be treated as read-only.
-type StepActionLister interface {
- // List lists all StepActions in the indexer.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1beta1.StepAction, err error)
- // StepActions returns an object that can list and get StepActions.
- StepActions(namespace string) StepActionNamespaceLister
- StepActionListerExpansion
-}
-
-// stepActionLister implements the StepActionLister interface.
-type stepActionLister struct {
- indexer cache.Indexer
-}
-
-// NewStepActionLister returns a new StepActionLister.
-func NewStepActionLister(indexer cache.Indexer) StepActionLister {
- return &stepActionLister{indexer: indexer}
-}
-
-// List lists all StepActions in the indexer.
-func (s *stepActionLister) List(selector labels.Selector) (ret []*v1beta1.StepAction, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1beta1.StepAction))
- })
- return ret, err
-}
-
-// StepActions returns an object that can list and get StepActions.
-func (s *stepActionLister) StepActions(namespace string) StepActionNamespaceLister {
- return stepActionNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// StepActionNamespaceLister helps list and get StepActions.
-// All objects returned here must be treated as read-only.
-type StepActionNamespaceLister interface {
- // List lists all StepActions in the indexer for a given namespace.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1beta1.StepAction, err error)
- // Get retrieves the StepAction from the indexer for a given namespace and name.
- // Objects returned here must be treated as read-only.
- Get(name string) (*v1beta1.StepAction, error)
- StepActionNamespaceListerExpansion
-}
-
-// stepActionNamespaceLister implements the StepActionNamespaceLister
-// interface.
-type stepActionNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all StepActions in the indexer for a given namespace.
-func (s stepActionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StepAction, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1beta1.StepAction))
- })
- return ret, err
-}
-
-// Get retrieves the StepAction from the indexer for a given namespace and name.
-func (s stepActionNamespaceLister) Get(name string) (*v1beta1.StepAction, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1beta1.Resource("stepaction"), name)
- }
- return obj.(*v1beta1.StepAction), nil
-}
diff --git a/upstream/pkg/client/resolution/informers/externalversions/factory.go b/upstream/pkg/client/resolution/informers/externalversions/factory.go
index d6e309712f3..840022745d4 100644
--- a/upstream/pkg/client/resolution/informers/externalversions/factory.go
+++ b/upstream/pkg/client/resolution/informers/externalversions/factory.go
@@ -42,7 +42,6 @@ type sharedInformerFactory struct {
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
- transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
@@ -81,14 +80,6 @@ func WithNamespace(namespace string) SharedInformerOption {
}
}
-// WithTransform sets a transform on all informers.
-func WithTransform(transform cache.TransformFunc) SharedInformerOption {
- return func(factory *sharedInformerFactory) *sharedInformerFactory {
- factory.transform = transform
- return factory
- }
-}
-
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
@@ -193,7 +184,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
}
informer = newFunc(f.client, resyncPeriod)
- informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
diff --git a/upstream/pkg/client/resource/informers/externalversions/factory.go b/upstream/pkg/client/resource/informers/externalversions/factory.go
index f03f0976e5e..6f20faef324 100644
--- a/upstream/pkg/client/resource/informers/externalversions/factory.go
+++ b/upstream/pkg/client/resource/informers/externalversions/factory.go
@@ -42,7 +42,6 @@ type sharedInformerFactory struct {
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
- transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
@@ -81,14 +80,6 @@ func WithNamespace(namespace string) SharedInformerOption {
}
}
-// WithTransform sets a transform on all informers.
-func WithTransform(transform cache.TransformFunc) SharedInformerOption {
- return func(factory *sharedInformerFactory) *sharedInformerFactory {
- factory.transform = transform
- return factory
- }
-}
-
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
@@ -193,7 +184,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
}
informer = newFunc(f.client, resyncPeriod)
- informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
diff --git a/upstream/pkg/container/step_replacements.go b/upstream/pkg/container/step_replacements.go
index e30c3335bac..921995157a2 100644
--- a/upstream/pkg/container/step_replacements.go
+++ b/upstream/pkg/container/step_replacements.go
@@ -31,7 +31,6 @@ func ApplyStepReplacements(step *v1.Step, stringReplacements map[string]string,
if step.StderrConfig != nil {
step.StderrConfig.Path = substitution.ApplyReplacements(step.StderrConfig.Path, stringReplacements)
}
- step.When = step.When.ReplaceVariables(stringReplacements, arrayReplacements)
applyStepReplacements(step, stringReplacements, arrayReplacements)
}
diff --git a/upstream/pkg/container/step_replacements_test.go b/upstream/pkg/container/step_replacements_test.go
index ba45fee8901..4da6e4acf70 100644
--- a/upstream/pkg/container/step_replacements_test.go
+++ b/upstream/pkg/container/step_replacements_test.go
@@ -23,7 +23,6 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/container"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/selection"
)
func TestApplyStepReplacements(t *testing.T) {
@@ -44,12 +43,6 @@ func TestApplyStepReplacements(t *testing.T) {
Args: []string{"$(array.replace.me)"},
WorkingDir: "$(replace.me)",
OnError: "$(replace.me)",
- When: v1.StepWhenExpressions{{
- Input: "$(replace.me)",
- Operator: selection.In,
- Values: []string{"$(array.replace.me)"},
- CEL: "'$(replace.me)=bar'",
- }},
EnvFrom: []corev1.EnvFromSource{{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
@@ -101,12 +94,6 @@ func TestApplyStepReplacements(t *testing.T) {
Args: []string{"val1", "val2"},
WorkingDir: "replaced!",
OnError: "replaced!",
- When: v1.StepWhenExpressions{{
- Input: "replaced!",
- Operator: selection.In,
- Values: []string{"val1", "val2"},
- CEL: "'replaced!=bar'",
- }},
EnvFrom: []corev1.EnvFromSource{{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
diff --git a/upstream/pkg/entrypoint/entrypointer.go b/upstream/pkg/entrypoint/entrypointer.go
index b3913665a75..8ae4c1f2cfd 100644
--- a/upstream/pkg/entrypoint/entrypointer.go
+++ b/upstream/pkg/entrypoint/entrypointer.go
@@ -40,8 +40,6 @@ import (
"github.com/tektoncd/pipeline/pkg/result"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/termination"
-
- "github.com/google/cel-go/cel"
"go.uber.org/zap"
)
@@ -52,22 +50,6 @@ const (
FailOnError = "stopAndFail"
)
-const (
- breakpointExitSuffix = ".breakpointexit"
- breakpointBeforeStepSuffix = ".beforestepexit"
-)
-
-// DebugBeforeStepError is an error means mark before step breakpoint failure
-type DebugBeforeStepError string
-
-func (e DebugBeforeStepError) Error() string {
- return string(e)
-}
-
-var (
- errDebugBeforeStep = DebugBeforeStepError("before step breakpoint error file, user decided to skip the current step execution")
-)
-
// ScriptDir for testing
var ScriptDir = pipeline.ScriptDir
@@ -138,8 +120,6 @@ type Entrypointer struct {
Timeout *time.Duration
// BreakpointOnFailure helps determine if entrypoint execution needs to adapt debugging requirements
BreakpointOnFailure bool
- // DebugBeforeStep help user attach container before execution
- DebugBeforeStep bool
// OnError defines exiting behavior of the entrypoint
// set it to "stopAndFail" to indicate the entrypoint to exit the taskRun if the container exits with non zero exit code
// set it to "continue" to indicate the entrypoint to continue executing the rest of the steps irrespective of the container exit code
@@ -152,12 +132,6 @@ type Entrypointer struct {
ResultsDirectory string
// ResultExtractionMethod is the method using which the controller extracts the results from the task pod.
ResultExtractionMethod string
-
- // StepWhenExpressions a list of when expression to decide if the step should be skipped
- StepWhenExpressions v1.StepWhenExpressions
-
- // ArtifactsDirectory is the directory to find artifacts, defaults to pipeline.ArtifactsDir
- ArtifactsDirectory string
}
// Waiter encapsulates waiting for files to exist.
@@ -219,17 +193,13 @@ func (e Entrypointer) Go() error {
}
}
- var err error
- if e.DebugBeforeStep {
- err = e.waitBeforeStepDebug()
- }
-
output = append(output, result.RunResult{
Key: "StartedAt",
Value: time.Now().Format(timeFormat),
ResultType: result.InternalTektonResultType,
})
+ var err error
if e.Timeout != nil && *e.Timeout < time.Duration(0) {
err = errors.New("negative timeout specified")
}
@@ -254,26 +224,11 @@ func (e Entrypointer) Go() error {
logger.Error("Error while waiting for cancellation", zap.Error(err))
}
}()
- allowExec, err1 := e.allowExec()
-
- switch {
- case err1 != nil:
- err = err1
- case allowExec:
- err = e.Runner.Run(ctx, e.Command...)
- default:
- logger.Info("Step was skipped due to when expressions were evaluated to false.")
- output = append(output, e.outputRunResult(pod.TerminationReasonSkipped))
- e.WritePostFile(e.PostFile, nil)
- e.WriteExitCodeFile(e.StepMetadataDir, "0")
- return nil
- }
+ err = e.Runner.Run(ctx, e.Command...)
}
var ee *exec.ExitError
switch {
- case err != nil && errors.Is(err, errDebugBeforeStep):
- e.WritePostFile(e.PostFile, err)
case err != nil && errors.Is(err, ErrContextCanceled):
logger.Info("Step was canceling")
output = append(output, e.outputRunResult(pod.TerminationReasonCancelled))
@@ -325,13 +280,19 @@ func (e Entrypointer) Go() error {
}
if e.ResultExtractionMethod == config.ResultExtractionMethodTerminationMessage {
- e.appendArtifactOutputs(&output, logger)
+ fp := filepath.Join(e.StepMetadataDir, "artifacts", "provenance.json")
+
+ artifacts, err := readArtifacts(fp)
+ if err != nil {
+ logger.Fatalf("Error while handling artifacts: %s", err)
+ }
+ output = append(output, artifacts...)
}
return err
}
-func readArtifacts(fp string, resultType result.ResultType) ([]result.RunResult, error) {
+func readArtifacts(fp string) ([]result.RunResult, error) {
file, err := os.ReadFile(fp)
if os.IsNotExist(err) {
return []result.RunResult{}, nil
@@ -339,85 +300,7 @@ func readArtifacts(fp string, resultType result.ResultType) ([]result.RunResult,
if err != nil {
return nil, err
}
- return []result.RunResult{{Key: fp, Value: string(file), ResultType: resultType}}, nil
-}
-
-func (e Entrypointer) appendArtifactOutputs(output *[]result.RunResult, logger *zap.SugaredLogger) {
- // step artifacts
- fp := filepath.Join(e.StepMetadataDir, "artifacts", "provenance.json")
- artifacts, err := readArtifacts(fp, result.StepArtifactsResultType)
- if err != nil {
- logger.Fatalf("Error while handling step artifacts: %s", err)
- }
- *output = append(*output, artifacts...)
-
- artifactsDir := pipeline.ArtifactsDir
- // task artifacts
- if e.ArtifactsDirectory != "" {
- artifactsDir = e.ArtifactsDirectory
- }
- fp = filepath.Join(artifactsDir, "provenance.json")
- artifacts, err = readArtifacts(fp, result.TaskRunArtifactsResultType)
- if err != nil {
- logger.Fatalf("Error while handling task artifacts: %s", err)
- }
- *output = append(*output, artifacts...)
-}
-
-func (e Entrypointer) allowExec() (bool, error) {
- when := e.StepWhenExpressions
- m := map[string]bool{}
-
- for _, we := range when {
- if we.CEL == "" {
- continue
- }
- b, ok := m[we.CEL]
- if ok && !b {
- return false, nil
- }
-
- env, err := cel.NewEnv()
- if err != nil {
- return false, err
- }
- ast, iss := env.Compile(we.CEL)
- if iss.Err() != nil {
- return false, iss.Err()
- }
- // Generate an evaluable instance of the Ast within the environment
- prg, err := env.Program(ast)
- if err != nil {
- return false, err
- }
- // Evaluate the CEL expression
- out, _, err := prg.Eval(map[string]interface{}{})
- if err != nil {
- return false, err
- }
-
- b, ok = out.Value().(bool)
- if !ok {
- return false, fmt.Errorf("the CEL expression %s is not evaluated to a boolean", we.CEL)
- }
- if !b {
- return false, err
- }
- m[we.CEL] = true
- }
- return when.AllowsExecution(m), nil
-}
-
-func (e Entrypointer) waitBeforeStepDebug() error {
- log.Println(`debug before step breakpoint has taken effect, waiting for user's decision:
-1) continue, use cmd: /tekton/debug/scripts/debug-beforestep-continue
-2) fail-continue, use cmd: /tekton/debug/scripts/debug-beforestep-fail-continue`)
- breakpointBeforeStepPostFile := e.PostFile + breakpointBeforeStepSuffix
- if waitErr := e.Waiter.Wait(context.Background(), breakpointBeforeStepPostFile, false, false); waitErr != nil {
- log.Println("error occurred while waiting for " + breakpointBeforeStepPostFile + " : " + errDebugBeforeStep.Error())
- return errDebugBeforeStep
- }
- return nil
+ return []result.RunResult{{Key: fp, Value: string(file), ResultType: result.ArtifactsResultType}}, nil
}
func (e Entrypointer) readResultsFromDisk(ctx context.Context, resultDir string, resultType result.ResultType) error {
@@ -498,28 +381,6 @@ func (e Entrypointer) waitingCancellation(ctx context.Context, cancel context.Ca
return nil
}
-// CheckForBreakpointOnFailure if step up breakpoint on failure
-// waiting breakpointExitPostFile to be written
-func (e Entrypointer) CheckForBreakpointOnFailure() {
- if e.BreakpointOnFailure {
- log.Println(`debug onFailure breakpoint has taken effect, waiting for user's decision:
-1) continue, use cmd: /tekton/debug/scripts/debug-continue
-2) fail-continue, use cmd: /tekton/debug/scripts/debug-fail-continue`)
- breakpointExitPostFile := e.PostFile + breakpointExitSuffix
- if waitErr := e.Waiter.Wait(context.Background(), breakpointExitPostFile, false, false); waitErr != nil {
- log.Println("error occurred while waiting for " + breakpointExitPostFile + " : " + waitErr.Error())
- }
- // get exitcode from .breakpointexit
- exitCode, readErr := e.BreakpointExitCode(breakpointExitPostFile)
- // if readErr exists, the exitcode with default to 0 as we would like
- // to encourage to continue running the next steps in the taskRun
- if readErr != nil {
- log.Println("error occurred while reading breakpoint exit code : " + readErr.Error())
- }
- os.Exit(exitCode)
- }
-}
-
// loadStepResult reads the step result file and returns the string, array or object result value.
func loadStepResult(stepDir string, stepName string, resultName string) (v1.ResultValue, error) {
v := v1.ResultValue{}
@@ -624,13 +485,6 @@ func (e *Entrypointer) applyStepResultSubstitutions(stepDir string) error {
if err := replaceEnv(stepDir); err != nil {
return err
}
-
- // replace when
- newWhen, err := replaceWhen(stepDir, e.StepWhenExpressions)
- if err != nil {
- return err
- }
- e.StepWhenExpressions = newWhen
// command + args
newCommand, err := replaceCommandAndArgs(e.Command, stepDir)
if err != nil {
@@ -640,58 +494,6 @@ func (e *Entrypointer) applyStepResultSubstitutions(stepDir string) error {
return nil
}
-func replaceWhen(stepDir string, when v1.StepWhenExpressions) (v1.StepWhenExpressions, error) {
- for i, w := range when {
- var newValues []string
- flag:
- for _, v := range when[i].Values {
- matches := resultref.StepResultRegex.FindAllStringSubmatch(v, -1)
- newV := v
- for _, m := range matches {
- replaceWithString, replaceWithArray, err := findReplacement(stepDir, m[0])
- if err != nil {
- return v1.WhenExpressions{}, err
- }
- // replaceWithString and replaceWithArray are mutually exclusive
- if len(replaceWithArray) > 0 {
- if v != m[0] {
- // it has to be exact in "$(steps..results.[*])" format, without anything else in the original string
- return nil, errors.New("value must be in \"$(steps..results.[*])\" format, when using array results")
- }
- newValues = append(newValues, replaceWithArray...)
- continue flag
- }
- newV = strings.ReplaceAll(newV, m[0], replaceWithString)
- }
- newValues = append(newValues, newV)
- }
- when[i].Values = newValues
-
- matches := resultref.StepResultRegex.FindAllStringSubmatch(w.Input, -1)
- v := when[i].Input
- for _, m := range matches {
- replaceWith, _, err := findReplacement(stepDir, m[0])
- if err != nil {
- return v1.StepWhenExpressions{}, err
- }
- v = strings.ReplaceAll(v, m[0], replaceWith)
- }
- when[i].Input = v
-
- matches = resultref.StepResultRegex.FindAllStringSubmatch(w.CEL, -1)
- c := when[i].CEL
- for _, m := range matches {
- replaceWith, _, err := findReplacement(stepDir, m[0])
- if err != nil {
- return v1.StepWhenExpressions{}, err
- }
- c = strings.ReplaceAll(c, m[0], replaceWith)
- }
- when[i].CEL = c
- }
- return when, nil
-}
-
// outputRunResult returns the run reason for a termination
func (e Entrypointer) outputRunResult(terminationReason string) result.RunResult {
return result.RunResult{
@@ -738,6 +540,7 @@ func getArtifactValues(dir string, template string) (string, error) {
}
// $(steps.stepName.outputs.artifactName) <- artifacts.Output[artifactName].Values
+ // $(steps.stepName.outputs) <- artifacts.Output[0].Values
var t []v1.Artifact
if artifactTemplate.Type == "outputs" {
t = artifacts.Outputs
@@ -745,6 +548,13 @@ func getArtifactValues(dir string, template string) (string, error) {
t = artifacts.Inputs
}
+ if artifactTemplate.ArtifactName == "" {
+ marshal, err := json.Marshal(t[0].Values)
+ if err != nil {
+ return "", err
+ }
+ return string(marshal), err
+ }
for _, ar := range t {
if ar.Name == artifactTemplate.ArtifactName {
marshal, err := json.Marshal(ar.Values)
@@ -758,7 +568,8 @@ func getArtifactValues(dir string, template string) (string, error) {
}
// parseArtifactTemplate parses an artifact template string and extracts relevant information into an ArtifactTemplate struct.
-// The artifact template is expected to be in the format "$(steps..outputs.)".
+//
+// The artifact template is expected to be in the format "$(steps.{step-name}.outputs.{artifact-name})" or "$(steps.{step-name}.outputs)".
func parseArtifactTemplate(template string) (ArtifactTemplate, error) {
if template == "" {
return ArtifactTemplate{}, errors.New("template is empty")
diff --git a/upstream/pkg/entrypoint/entrypointer_test.go b/upstream/pkg/entrypoint/entrypointer_test.go
index b5c423d1376..40aa53158f3 100644
--- a/upstream/pkg/entrypoint/entrypointer_test.go
+++ b/upstream/pkg/entrypoint/entrypointer_test.go
@@ -32,6 +32,7 @@ import (
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
@@ -40,10 +41,7 @@ import (
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/termination"
"github.com/tektoncd/pipeline/test/diff"
-
- "github.com/google/go-cmp/cmp"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/selection"
"knative.dev/pkg/logging"
)
@@ -142,9 +140,7 @@ func TestEntrypointer(t *testing.T) {
for _, c := range []struct {
desc, entrypoint, postFile, stepDir, stepDirLink string
waitFiles, args []string
- waitDebugFiles []string
breakpointOnFailure bool
- debugBeforeStep bool
}{{
desc: "do nothing",
}, {
@@ -175,17 +171,6 @@ func TestEntrypointer(t *testing.T) {
}, {
desc: "breakpointOnFailure to wait or not to wait ",
breakpointOnFailure: true,
- }, {
- desc: "breakpointBeforeStep to wait or not to wait",
- debugBeforeStep: true,
- waitFiles: []string{"waitforme"},
- waitDebugFiles: []string{".beforestepexit"},
- }, {
- desc: "all breakpoints to wait or not to wait",
- breakpointOnFailure: true,
- debugBeforeStep: true,
- waitFiles: []string{"waitforme", ".beforestepexit"},
- waitDebugFiles: []string{".beforestepexit"},
}} {
t.Run(c.desc, func(t *testing.T) {
fw, fr, fpw := &fakeWaiter{}, &fakeRunner{}, &fakePostWriter{}
@@ -207,7 +192,6 @@ func TestEntrypointer(t *testing.T) {
TerminationPath: terminationPath,
Timeout: &timeout,
BreakpointOnFailure: c.breakpointOnFailure,
- DebugBeforeStep: c.debugBeforeStep,
StepMetadataDir: c.stepDir,
}.Go()
if err != nil {
@@ -221,7 +205,7 @@ func TestEntrypointer(t *testing.T) {
if len(c.waitFiles) > 0 {
if fw.waited == nil {
t.Error("Wanted waited file, got nil")
- } else if !reflect.DeepEqual(fw.waited, append(c.waitFiles, c.waitDebugFiles...)) {
+ } else if !reflect.DeepEqual(fw.waited, c.waitFiles) {
t.Errorf("Waited for %v, want %v", fw.waited, c.waitFiles)
}
}
@@ -280,47 +264,6 @@ func TestEntrypointer(t *testing.T) {
}
}
-func TestCheckForBreakpointOnFailure(t *testing.T) {
- testCases := []struct {
- name string
- breakpointOnFailure bool
- }{
- {
- name: "set breakpoint on failure and exit with code 0",
- breakpointOnFailure: true,
- },
- {
- name: "unset breakpoint on failure",
- breakpointOnFailure: false,
- },
- }
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- tmp, err := os.CreateTemp("", "1*.breakpoint")
- if err != nil {
- t.Fatalf("error while creating temp file for testing exit code written by breakpoint")
- }
- breakpointFile, err := os.Create(tmp.Name() + breakpointExitSuffix)
- if err != nil {
- t.Fatalf("failed to create breakpoint waiting file, err: %v", err)
- }
- // write exit code to file
- if err = os.WriteFile(breakpointFile.Name(), []byte("0"), 0700); err != nil {
- t.Fatalf("failed writing to temp file create temp file for testing exit code written by breakpoint, err: %v", err)
- }
- e := Entrypointer{
- BreakpointOnFailure: tc.breakpointOnFailure,
- PostFile: tmp.Name(),
- Waiter: &fakeWaiter{},
- }
- defer func() {
- recover()
- }()
- e.CheckForBreakpointOnFailure()
- })
- }
-}
-
func TestReadResultsFromDisk(t *testing.T) {
for _, c := range []struct {
desc string
@@ -483,7 +426,6 @@ func TestEntrypointer_OnError(t *testing.T) {
desc, postFile, onError string
runner Runner
expectedError bool
- debugBeforeStep bool
}{{
desc: "the step is exiting with 1, ignore the step error when onError is set to continue",
runner: &fakeExitErrorRunner{},
@@ -508,13 +450,6 @@ func TestEntrypointer_OnError(t *testing.T) {
postFile: "step-one",
onError: FailOnError,
expectedError: false,
- }, {
- desc: "the step set debug before step, and before step breakpoint fail-continue",
- runner: &fakeRunner{},
- postFile: "step-one",
- onError: errDebugBeforeStep.Error(),
- debugBeforeStep: true,
- expectedError: true,
}} {
t.Run(c.desc, func(t *testing.T) {
fpw := &fakePostWriter{}
@@ -525,7 +460,7 @@ func TestEntrypointer_OnError(t *testing.T) {
terminationPath = terminationFile.Name()
defer os.Remove(terminationFile.Name())
}
- entry := Entrypointer{
+ err := Entrypointer{
Command: []string{"echo", "some", "args"},
WaitFiles: []string{},
PostFile: c.postFile,
@@ -534,23 +469,12 @@ func TestEntrypointer_OnError(t *testing.T) {
PostWriter: fpw,
TerminationPath: terminationPath,
OnError: c.onError,
- DebugBeforeStep: c.debugBeforeStep,
- }
- if c.expectedError && (c.debugBeforeStep) {
- entry.Waiter = &fakeErrorWaiter{}
- }
- err := entry.Go()
+ }.Go()
if c.expectedError && err == nil {
t.Fatalf("Entrypointer didn't fail")
}
- if c.expectedError && (c.debugBeforeStep) {
- if err.Error() != c.onError {
- t.Errorf("breakpoint fail-continue, want err: %s but got: %s", c.onError, err.Error())
- }
- }
-
if c.onError == ContinueOnError {
switch {
case fpw.wrote == nil:
@@ -838,34 +762,31 @@ func TestApplyStepResultSubstitutions_Env(t *testing.T) {
envValue string
want string
wantErr bool
- }{
- {
- name: "string param",
- stepName: "foo",
- resultName: "res",
- result: "Hello",
- envValue: "$(steps.foo.results.res)",
- want: "Hello",
- wantErr: false,
- },
- {
- name: "array param",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- envValue: "$(steps.foo.results.res[1])",
- want: "World",
- wantErr: false,
- },
- {
- name: "object param",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- envValue: "$(steps.foo.results.res.hello)",
- want: "World",
- wantErr: false,
- },
+ }{{
+ name: "string param",
+ stepName: "foo",
+ resultName: "res",
+ result: "Hello",
+ envValue: "$(steps.foo.results.res)",
+ want: "Hello",
+ wantErr: false,
+ }, {
+ name: "array param",
+ stepName: "foo",
+ resultName: "res",
+ result: "[\"Hello\",\"World\"]",
+ envValue: "$(steps.foo.results.res[1])",
+ want: "World",
+ wantErr: false,
+ }, {
+ name: "object param",
+ stepName: "foo",
+ resultName: "res",
+ result: "{\"hello\":\"World\"}",
+ envValue: "$(steps.foo.results.res.hello)",
+ want: "World",
+ wantErr: false,
+ },
{
name: "interpolation multiple matches",
stepName: "foo",
@@ -874,8 +795,7 @@ func TestApplyStepResultSubstitutions_Env(t *testing.T) {
envValue: "$(steps.foo.results.res.first)-$(steps.foo.results.res.second)",
want: "hello-world",
wantErr: false,
- },
- {
+ }, {
name: "bad-result-format",
stepName: "foo",
resultName: "res",
@@ -883,8 +803,7 @@ func TestApplyStepResultSubstitutions_Env(t *testing.T) {
envValue: "echo $(steps.foo.results.res.hello.bar)",
want: "echo $(steps.foo.results.res.hello.bar)",
wantErr: true,
- },
- }
+ }}
stepDir := createTmpDir(t, "env-steps")
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
@@ -925,325 +844,87 @@ func TestApplyStepResultSubstitutions_Command(t *testing.T) {
command []string
want []string
wantErr bool
- }{
- {
- name: "string param",
- stepName: "foo",
- resultName: "res1",
- result: "Hello",
- command: []string{"$(steps.foo.results.res1)"},
- want: []string{"Hello"},
- wantErr: false,
- }, {
- name: "array param",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- command: []string{"$(steps.foo.results.res[1])"},
- want: []string{"World"},
- wantErr: false,
- }, {
- name: "array param no index",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- command: []string{"start", "$(steps.foo.results.res[*])", "stop"},
- want: []string{"start", "Hello", "World", "stop"},
- wantErr: false,
- }, {
- name: "object param",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- command: []string{"$(steps.foo.results.res.hello)"},
- want: []string{"World"},
- wantErr: false,
- }, {
- name: "bad-result-format",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- command: []string{"echo $(steps.foo.results.res.hello.bar)"},
- want: []string{"echo $(steps.foo.results.res.hello.bar)"},
- wantErr: true,
- }, {
- name: "array param no index, with extra string",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- command: []string{"start", "$(steps.foo.results.res[*])bbb", "stop"},
- want: []string{"start", "$(steps.foo.results.res[*])bbb", "stop"},
- wantErr: true,
- }, {
- name: "array param, multiple matches",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- command: []string{"$(steps.foo.results.res[0])-$(steps.foo.results.res[1])"},
- want: []string{"Hello-World"},
- wantErr: false,
- }, {
- name: "object param, multiple matches",
- stepName: "foo",
- resultName: "res",
- result: `{"first":"hello", "second":"world"}`,
- command: []string{"$(steps.foo.results.res.first)-$(steps.foo.results.res.second)"},
- want: []string{"hello-world"},
- wantErr: false,
- },
- }
- stepDir := createTmpDir(t, "command-steps")
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resultPath := filepath.Join(stepDir, pod.GetContainerName(tc.stepName), "results")
- err := os.MkdirAll(resultPath, 0o750)
- if err != nil {
- log.Fatal(err)
- }
- resultFile := filepath.Join(resultPath, tc.resultName)
- err = os.WriteFile(resultFile, []byte(tc.result), 0o666)
- if err != nil {
- log.Fatal(err)
- }
- e := Entrypointer{
- Command: tc.command,
- }
- err = e.applyStepResultSubstitutions(stepDir)
- if tc.wantErr == false && err != nil {
- t.Fatalf("Did not expect and error but got: %v", err)
- } else if tc.wantErr == true && err == nil {
- t.Fatalf("Expected and error but did not get any.")
- }
- got := e.Command
- if d := cmp.Diff(tc.want, got); d != "" {
- t.Errorf("Entrypointer error diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func TestApplyStepWhenSubstitutions_Input(t *testing.T) {
- testCases := []struct {
- name string
- stepName string
- resultName string
- result string
- want v1.StepWhenExpressions
- when v1.StepWhenExpressions
- wantErr bool
}{{
name: "string param",
stepName: "foo",
- resultName: "res",
+ resultName: "res1",
result: "Hello",
- when: v1.StepWhenExpressions{{Input: "$(steps.foo.results.res)"}},
- want: v1.StepWhenExpressions{{Input: "Hello"}},
+ command: []string{"$(steps.foo.results.res1)"},
+ want: []string{"Hello"},
wantErr: false,
}, {
name: "array param",
stepName: "foo",
resultName: "res",
result: "[\"Hello\",\"World\"]",
- when: v1.StepWhenExpressions{{Input: "$(steps.foo.results.res[1])"}},
- want: v1.StepWhenExpressions{{Input: "World"}},
- wantErr: false,
- }, {
- name: "object param",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{Input: "$(steps.foo.results.res.hello)"}},
- want: v1.StepWhenExpressions{{Input: "World"}},
- wantErr: false,
- }, {
- name: "bad-result-format",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{Input: "$(steps.foo.results.res.hello.bar)"}},
- want: v1.StepWhenExpressions{{Input: "$(steps.foo.results.res.hello.bar)"}},
- wantErr: true,
- }}
- stepDir := createTmpDir(t, "when-input")
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resultPath := filepath.Join(stepDir, pod.GetContainerName(tc.stepName), "results")
- err := os.MkdirAll(resultPath, 0750)
- if err != nil {
- log.Fatal(err)
- }
- resultFile := filepath.Join(resultPath, tc.resultName)
- err = os.WriteFile(resultFile, []byte(tc.result), 0666)
- if err != nil {
- log.Fatal(err)
- }
- e := Entrypointer{
- Command: []string{},
- StepWhenExpressions: tc.when,
- }
- err = e.applyStepResultSubstitutions(stepDir)
- if tc.wantErr == false && err != nil {
- t.Fatalf("Did not expect and error but got: %v", err)
- } else if tc.wantErr == true && err == nil {
- t.Fatalf("Expected and error but did not get any.")
- }
- got := e.StepWhenExpressions
- if d := cmp.Diff(got, tc.want); d != "" {
- t.Errorf("applyStepResultSubstitutions(): got %v; want %v", got, tc.want)
- }
- })
- }
-}
-
-func TestApplyStepWhenSubstitutions_CEL(t *testing.T) {
- testCases := []struct {
- name string
- stepName string
- resultName string
- result string
- want v1.StepWhenExpressions
- when v1.StepWhenExpressions
- wantErr bool
- }{{
- name: "string param",
- stepName: "foo",
- resultName: "res",
- result: "Hello",
- when: v1.StepWhenExpressions{{CEL: "$(steps.foo.results.res)"}},
- want: v1.StepWhenExpressions{{CEL: "Hello"}},
+ command: []string{"$(steps.foo.results.res[1])"},
+ want: []string{"World"},
wantErr: false,
}, {
- name: "array param",
+ name: "array param no index",
stepName: "foo",
resultName: "res",
result: "[\"Hello\",\"World\"]",
- when: v1.StepWhenExpressions{{CEL: "$(steps.foo.results.res[1])"}},
- want: v1.StepWhenExpressions{{CEL: "World"}},
+ command: []string{"start", "$(steps.foo.results.res[*])", "stop"},
+ want: []string{"start", "Hello", "World", "stop"},
wantErr: false,
}, {
name: "object param",
stepName: "foo",
resultName: "res",
result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{CEL: "$(steps.foo.results.res.hello)"}},
- want: v1.StepWhenExpressions{{CEL: "World"}},
+ command: []string{"$(steps.foo.results.res.hello)"},
+ want: []string{"World"},
wantErr: false,
}, {
name: "bad-result-format",
stepName: "foo",
resultName: "res",
result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{CEL: "$(steps.foo.results.res.hello.bar)"}},
- want: v1.StepWhenExpressions{{CEL: "$(steps.foo.results.res.hello.bar)"}},
+ command: []string{"echo $(steps.foo.results.res.hello.bar)"},
+ want: []string{"echo $(steps.foo.results.res.hello.bar)"},
wantErr: true,
- }}
- stepDir := createTmpDir(t, "when-CEL")
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resultPath := filepath.Join(stepDir, pod.GetContainerName(tc.stepName), "results")
- err := os.MkdirAll(resultPath, 0750)
- if err != nil {
- log.Fatal(err)
- }
- resultFile := filepath.Join(resultPath, tc.resultName)
- err = os.WriteFile(resultFile, []byte(tc.result), 0666)
- if err != nil {
- log.Fatal(err)
- }
- e := Entrypointer{
- Command: []string{},
- StepWhenExpressions: tc.when,
- }
- err = e.applyStepResultSubstitutions(stepDir)
- if tc.wantErr == false && err != nil {
- t.Fatalf("Did not expect and error but got: %v", err)
- } else if tc.wantErr == true && err == nil {
- t.Fatalf("Expected and error but did not get any.")
- }
- got := e.StepWhenExpressions
- if d := cmp.Diff(got, tc.want); d != "" {
- t.Errorf("applyStepResultSubstitutions(): got %v; want %v", got, tc.want)
- }
- })
- }
-}
-
-func TestApplyStepWhenSubstitutions_Values(t *testing.T) {
- testCases := []struct {
- name string
- stepName string
- resultName string
- result string
- want v1.StepWhenExpressions
- when v1.StepWhenExpressions
- wantErr bool
- }{{
- name: "string param",
+ }, {
+ name: "array param no index, with extra string",
stepName: "foo",
resultName: "res",
- result: "Hello",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res)"}}},
- want: v1.StepWhenExpressions{{Values: []string{"Hello"}}},
- wantErr: false,
+ result: "[\"Hello\",\"World\"]",
+ command: []string{"start", "$(steps.foo.results.res[*])bbb", "stop"},
+ want: []string{"start", "$(steps.foo.results.res[*])bbb", "stop"},
+ wantErr: true,
}, {
- name: "array param, reference an element",
+ name: "array param, multiple matches",
stepName: "foo",
resultName: "res",
result: "[\"Hello\",\"World\"]",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res[1])"}}},
- want: v1.StepWhenExpressions{{Values: []string{"World"}}},
+ command: []string{"$(steps.foo.results.res[0])-$(steps.foo.results.res[1])"},
+ want: []string{"Hello-World"},
wantErr: false,
}, {
- name: "array param, reference whole array",
+ name: "object param, multiple matches",
stepName: "foo",
resultName: "res",
- result: "[\"Hello\",\"World\"]",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res[*])"}}},
- want: v1.StepWhenExpressions{{Values: []string{"Hello", "World"}}},
+ result: `{"first":"hello", "second":"world"}`,
+ command: []string{"$(steps.foo.results.res.first)-$(steps.foo.results.res.second)"},
+ want: []string{"hello-world"},
wantErr: false,
},
- {
- name: "array param, reference whole array with concatenation, error",
- stepName: "foo",
- resultName: "res",
- result: "[\"Hello\",\"World\"]",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res[*])1"}}},
- want: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res[*])1"}}},
- wantErr: true,
- },
- {
- name: "object param",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res.hello)"}}},
- want: v1.StepWhenExpressions{{Values: []string{"World"}}},
- wantErr: false,
- }, {
- name: "bad-result-format",
- stepName: "foo",
- resultName: "res",
- result: "{\"hello\":\"World\"}",
- when: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res.hello.bar)"}}},
- want: v1.StepWhenExpressions{{Values: []string{"$(steps.foo.results.res.hello.bar)"}}},
- wantErr: true,
- }}
- stepDir := createTmpDir(t, "when-values")
+ }
+ stepDir := createTmpDir(t, "command-steps")
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
resultPath := filepath.Join(stepDir, pod.GetContainerName(tc.stepName), "results")
- err := os.MkdirAll(resultPath, 0750)
+ err := os.MkdirAll(resultPath, 0o750)
if err != nil {
log.Fatal(err)
}
resultFile := filepath.Join(resultPath, tc.resultName)
- err = os.WriteFile(resultFile, []byte(tc.result), 0666)
+ err = os.WriteFile(resultFile, []byte(tc.result), 0o666)
if err != nil {
log.Fatal(err)
}
e := Entrypointer{
- Command: []string{},
- StepWhenExpressions: tc.when,
+ Command: tc.command,
}
err = e.applyStepResultSubstitutions(stepDir)
if tc.wantErr == false && err != nil {
@@ -1251,148 +932,14 @@ func TestApplyStepWhenSubstitutions_Values(t *testing.T) {
} else if tc.wantErr == true && err == nil {
t.Fatalf("Expected and error but did not get any.")
}
- got := e.StepWhenExpressions
- if d := cmp.Diff(got, tc.want); d != "" {
- t.Errorf("applyStepResultSubstitutions(): got %v; want %v", got, tc.want)
+ got := e.Command
+ if d := cmp.Diff(tc.want, got); d != "" {
+ t.Errorf("Entrypointer error diff %s", diff.PrintWantGot(d))
}
})
}
}
-func TestAllowExec(t *testing.T) {
- tests := []struct {
- name string
- whenExpressions v1.StepWhenExpressions
- expected bool
- wantErr bool
- }{{
- name: "in expression",
- whenExpressions: v1.StepWhenExpressions{
- {
- Input: "foo",
- Operator: selection.In,
- Values: []string{"foo", "bar"},
- },
- },
- expected: true,
- }, {
- name: "notin expression",
- whenExpressions: v1.StepWhenExpressions{
- {
- Input: "foobar",
- Operator: selection.NotIn,
- Values: []string{"foobar"},
- },
- },
- expected: false,
- }, {
- name: "multiple expressions - false",
- whenExpressions: v1.StepWhenExpressions{
- {
- Input: "foobar",
- Operator: selection.In,
- Values: []string{"foobar"},
- }, {
- Input: "foo",
- Operator: selection.In,
- Values: []string{"bar"},
- },
- },
- expected: false,
- }, {
- name: "multiple expressions - true",
- whenExpressions: v1.StepWhenExpressions{
- {
- Input: "foobar",
- Operator: selection.In,
- Values: []string{"foobar"},
- }, {
- Input: "foo",
- Operator: selection.NotIn,
- Values: []string{"bar"},
- },
- },
- expected: true,
- }, {
- name: "CEL is true",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "'foo'=='foo'",
- },
- },
- expected: true,
- }, {
- name: "CEL is false",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "'foo'!='foo'",
- },
- },
- expected: false,
- },
- {
- name: "multiple expressions - 1. CEL is true 2. In Op is false, expect false",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "'foo'=='foo'",
- },
- {
- Input: "foo",
- Operator: selection.In,
- Values: []string{"bar"},
- },
- },
- expected: false,
- },
- {
- name: "multiple expressions - 1. CEL is true 2. CEL is false, expect false",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "'foo'=='foo'",
- },
- {
- CEL: "'xxx'!='xxx'",
- },
- },
- expected: false,
- },
- {
- name: "CEL is not evaluated to bool",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "'foo'",
- },
- },
- expected: false,
- wantErr: true,
- },
- {
- name: "CEL cannot be compiled",
- whenExpressions: v1.StepWhenExpressions{
- {
- CEL: "foo==foo",
- },
- },
- expected: false,
- wantErr: true,
- },
- }
-
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- e := Entrypointer{
- StepWhenExpressions: tc.whenExpressions,
- }
- allowExec, err := e.allowExec()
- if d := cmp.Diff(allowExec, tc.expected); d != "" {
- t.Errorf("expected equlity of execution evalution, but got: %t, want: %t", allowExec, tc.expected)
- }
- if (err != nil) != tc.wantErr {
- t.Errorf("error checking failed, err %v", err)
- }
- })
- }
-}
func TestIsContextDeadlineError(t *testing.T) {
ctxErr := ContextError(context.DeadlineExceeded.Error())
if !IsContextDeadlineError(ctxErr) {
@@ -1425,7 +972,6 @@ func TestTerminationReason(t *testing.T) {
expectedExitCode *string
expectedWrotefile *string
expectedStatus []result.RunResult
- when v1.WhenExpressions
}{
{
desc: "reason completed",
@@ -1487,7 +1033,7 @@ func TestTerminationReason(t *testing.T) {
},
},
{
- desc: "reason skipped due to previous step error",
+ desc: "reason skipped",
waitFiles: []string{"file"},
expectedRunErr: ErrSkipPreviousStepFailed,
expectedWrotefile: ptr("postfile.err"),
@@ -1503,23 +1049,6 @@ func TestTerminationReason(t *testing.T) {
},
},
},
- {
- desc: "reason skipped due to when expressions evaluation",
- expectedExitCode: ptr("0"),
- expectedWrotefile: ptr("postfile"),
- when: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"bar"}}},
- expectedStatus: []result.RunResult{
- {
- Key: "Reason",
- Value: pod.TerminationReasonSkipped,
- ResultType: result.InternalTektonResultType,
- },
- {
- Key: "StartedAt",
- ResultType: result.InternalTektonResultType,
- },
- },
- },
}
for _, test := range tests {
@@ -1549,7 +1078,6 @@ func TestTerminationReason(t *testing.T) {
BreakpointOnFailure: false,
StepMetadataDir: tmpFolder,
OnError: test.onError,
- StepWhenExpressions: test.when,
}
err = e.Go()
@@ -1582,7 +1110,7 @@ func TestReadArtifactsFileDoesNotExist(t *testing.T) {
t.Run("readArtifact file doesn't exist, empty result, no error.", func(t *testing.T) {
dir := createTmpDir(t, "")
fp := filepath.Join(dir, "provenance.json")
- got, err := readArtifacts(fp, result.StepArtifactsResultType)
+ got, err := readArtifacts(fp)
if err != nil {
t.Fatalf("Did not expect and error but got: %v", err)
@@ -1599,11 +1127,11 @@ func TestReadArtifactsFileExistNoError(t *testing.T) {
t.Run("readArtifact file exist", func(t *testing.T) {
dir := createTmpDir(t, "")
fp := filepath.Join(dir, "provenance.json")
- err := os.WriteFile(fp, []byte{}, 0o755)
+ err := os.WriteFile(fp, []byte{}, 0755)
if err != nil {
t.Fatalf("Did not expect and error but got: %v", err)
}
- got, err := readArtifacts(fp, result.StepArtifactsResultType)
+ got, err := readArtifacts(fp)
if err != nil {
t.Fatalf("Did not expect and error but got: %v", err)
@@ -1623,11 +1151,11 @@ func TestReadArtifactsFileExistReadError(t *testing.T) {
}
dir := createTmpDir(t, "")
fp := filepath.Join(dir, "provenance.json")
- err := os.WriteFile(fp, []byte{}, 0o000)
+ err := os.WriteFile(fp, []byte{}, 0000)
if err != nil {
t.Fatalf("Did not expect and error but got: %v", err)
}
- got, err := readArtifacts(fp, result.StepArtifactsResultType)
+ got, err := readArtifacts(fp)
if err == nil {
t.Fatalf("expecting error but got nil")
@@ -1660,18 +1188,18 @@ func TestLoadStepArtifacts(t *testing.T) {
}{
{
desc: "read artifact success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: v1.Artifacts{
Inputs: []v1.Artifact{{Name: "inputs", Values: []v1.ArtifactValue{{
Digest: map[v1.Algorithm]string{"sha256": "cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},
Uri: "pkg:example.github.com/inputs",
}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{
+ Outputs: []v1.Artifact{{Name: "output", Values: []v1.ArtifactValue{{
Digest: map[v1.Algorithm]string{"sha256": "64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},
Uri: "docker:example.registry.com/outputs",
}}}},
},
- mode: 0o755,
+ mode: 0755,
},
{
desc: "read artifact file doesn't exist, error",
@@ -1681,26 +1209,26 @@ func TestLoadStepArtifacts(t *testing.T) {
{
desc: "read artifact, mal-formatted json, error",
fileContent: `{\\`,
- mode: 0o755,
+ mode: 0755,
wantErr: true,
},
{
desc: "read artifact, file cannot be read, error",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
- mode: 0o000,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ mode: 0000,
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
- if tc.mode == 0o000 && os.Getuid() == 0 {
+ if tc.mode == 0000 && os.Getuid() == 0 {
t.Skipf("Test doesn't work when running with root")
}
dir := createTmpDir(t, "")
name := "step-name"
artifactsPath := getStepArtifactsPath(dir, name)
if tc.fileContent != "" {
- err := os.MkdirAll(filepath.Dir(artifactsPath), 0o755)
+ err := os.MkdirAll(filepath.Dir(artifactsPath), 0755)
if err != nil {
t.Fatalf("fail to create dir %v", err)
}
@@ -1729,7 +1257,7 @@ func TestParseArtifactTemplate(t *testing.T) {
wantErr bool
}{
{
- desc: "valid outputs template",
+ desc: "valid outputs template with artifact name",
input: "$(steps.name.outputs.aaa)",
want: ArtifactTemplate{
ContainerName: "step-name",
@@ -1738,7 +1266,15 @@ func TestParseArtifactTemplate(t *testing.T) {
},
},
{
- desc: "valid inputs template",
+ desc: "valid outputs template without artifact name",
+ input: "$(steps.name.outputs)",
+ want: ArtifactTemplate{
+ Type: "outputs",
+ ContainerName: "step-name",
+ },
+ },
+ {
+ desc: "valid inputs template with artifact name",
input: "$(steps.name.inputs.aaa)",
want: ArtifactTemplate{
ContainerName: "step-name",
@@ -1746,6 +1282,19 @@ func TestParseArtifactTemplate(t *testing.T) {
ArtifactName: "aaa",
},
},
+ {
+ desc: "valid outputs template without artifact name",
+ input: "$(steps.name.inputs)",
+ want: ArtifactTemplate{
+ Type: "inputs",
+ ContainerName: "step-name",
+ },
+ },
+ {
+ desc: "invalid template without artifact name, no prefix and suffix",
+ input: "steps.name.outputs",
+ wantErr: true,
+ },
{
desc: "invalid template with artifact name, no prefix and suffix",
input: "steps.name.outputs.aaa",
@@ -1812,52 +1361,80 @@ func TestGetArtifactValues(t *testing.T) {
mode os.FileMode
template string
}{
+ {
+ desc: "read outputs artifact without artifact name, success",
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ want: `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`,
+ mode: 0755,
+ template: fmt.Sprintf("$(steps.%s.outputs)", name),
+ },
+ {
+ desc: "read inputs artifact without artifact name, success",
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ want: `[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]`,
+ mode: 0755,
+ template: fmt.Sprintf("$(steps.%s.inputs)", name),
+ },
+ {
+ desc: "read outputs artifact without artifact name, multiple outputs, default to first",
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
+ want: `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`,
+ mode: 0755,
+ template: fmt.Sprintf("$(steps.%s.outputs)", name),
+ },
+ {
+ desc: "read inputs artifact without artifact name, multiple outputs, default to first",
+ fileContent: `{"outputs":[{"name":"out","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"inputs":[{"name":"in","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/inputs"}]},{"name":"in2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/inputs"}]}]}`,
+ want: `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/inputs"}]`,
+ mode: 0755,
+ template: fmt.Sprintf("$(steps.%s.inputs)", name),
+ },
{
desc: "read outputs artifact with artifact name, success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`,
- mode: 0o755,
- template: fmt.Sprintf("$(steps.%s.outputs.image)", name),
+ mode: 0755,
+ template: fmt.Sprintf("$(steps.%s.outputs.output)", name),
},
{
desc: "read inputs artifact with artifact name, success",
fileContent: `{"outputs":[{"name":"outputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/outputs"}]}],"inputs":[{"name":"input","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/inputs"}]}]}`,
want: `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/inputs"}]`,
- mode: 0o755,
+ mode: 0755,
template: fmt.Sprintf("$(steps.%s.inputs.input)", name),
},
{
desc: "read outputs artifact with artifact name, multiple outputs, success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
want: `[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]`,
- mode: 0o755,
+ mode: 0755,
template: fmt.Sprintf("$(steps.%s.outputs.output2)", name),
},
{
desc: "read inputs artifact with artifact name, multiple inputs, success",
fileContent: `{"outputs":[{"name":"outputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/outputs"}]}],"inputs":[{"name":"input","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/inputs"}]},{"name":"input2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/inputs"}]}]}`,
want: `[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/inputs"}]`,
- mode: 0o755,
+ mode: 0755,
template: fmt.Sprintf("$(steps.%s.inputs.input2)", name),
},
{
desc: "invalid template",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
- mode: 0o755,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
+ mode: 0755,
template: fmt.Sprintf("$(steps.%s.outputs.output2.333)", name),
wantErr: true,
},
{
desc: "fail to load artifacts",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
- mode: 0o000,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
+ mode: 0000,
template: fmt.Sprintf("$(steps.%s.outputs.output2.333)", name),
wantErr: true,
},
{
desc: "template not found",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
- mode: 0o755,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]},{"name":"output2","values":[{"digest":{"sha256":"22222157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f13402222"},"uri":"docker2:example.registry.com/outputs"}]}]}`,
+ mode: 0755,
template: fmt.Sprintf("$(steps.%s.outputs.output3)", name),
wantErr: true,
},
@@ -1865,13 +1442,13 @@ func TestGetArtifactValues(t *testing.T) {
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
- if tc.mode == 0o000 && os.Getuid() == 0 {
+ if tc.mode == 0000 && os.Getuid() == 0 {
t.Skipf("Test doesn't work when running with root")
}
dir := createTmpDir(t, "")
artifactsPath := getStepArtifactsPath(dir, "step-"+name)
if tc.fileContent != "" {
- err := os.MkdirAll(filepath.Dir(artifactsPath), 0o755)
+ err := os.MkdirAll(filepath.Dir(artifactsPath), 0755)
if err != nil {
t.Fatalf("fail to create dir %v", err)
}
@@ -1914,10 +1491,10 @@ func TestApplyStepArtifactSubstitutionsCommandSuccess(t *testing.T) {
}{
{
desc: "apply substitution to command from script file, success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: `echo [{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`,
- mode: 0o755,
- scriptContent: fmt.Sprintf("echo $(steps.%s.outputs.image)", stepName),
+ mode: 0755,
+ scriptContent: fmt.Sprintf("echo $(steps.%s.outputs)", stepName),
scriptFile: filepath.Join(scriptDir, "foo.sh"),
command: []string{filepath.Join(scriptDir, "foo.sh")},
},
@@ -1927,7 +1504,7 @@ func TestApplyStepArtifactSubstitutionsCommandSuccess(t *testing.T) {
stepDir := createTmpDir(t, "")
artifactsPath := getStepArtifactsPath(stepDir, "step-"+stepName)
if tc.fileContent != "" {
- err := os.MkdirAll(filepath.Dir(artifactsPath), 0o755)
+ err := os.MkdirAll(filepath.Dir(artifactsPath), 0755)
if err != nil {
t.Fatalf("fail to create stepDir %v", err)
}
@@ -1937,7 +1514,7 @@ func TestApplyStepArtifactSubstitutionsCommandSuccess(t *testing.T) {
}
}
if tc.scriptContent != "" {
- err := os.WriteFile(tc.scriptFile, []byte(tc.scriptContent), 0o755)
+ err := os.WriteFile(tc.scriptFile, []byte(tc.scriptContent), 0755)
if err != nil {
t.Fatalf("failed to write script to scriptFile %v", err)
}
@@ -1958,7 +1535,6 @@ func TestApplyStepArtifactSubstitutionsCommandSuccess(t *testing.T) {
})
}
}
-
func TestApplyStepArtifactSubstitutionsCommand(t *testing.T) {
stepName := "name"
scriptDir := createTmpDir(t, "script")
@@ -1980,49 +1556,49 @@ func TestApplyStepArtifactSubstitutionsCommand(t *testing.T) {
}{
{
desc: "apply substitution script, fail to read artifacts",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: []string{filepath.Join(scriptDir, "foo2.sh")},
- mode: 0o000,
+ mode: 0000,
wantErr: true,
- scriptContent: fmt.Sprintf("echo $(steps.%s.outputs.image)", stepName),
+ scriptContent: fmt.Sprintf("echo $(steps.%s.outputs)", stepName),
scriptFile: filepath.Join(scriptDir, "foo2.sh"),
command: []string{filepath.Join(scriptDir, "foo2.sh")},
},
{
desc: "apply substitution to command from script file , no matches success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: []string{filepath.Join(scriptDir, "bar.sh")},
- mode: 0o755,
+ mode: 0755,
scriptContent: "echo 123",
scriptFile: filepath.Join(scriptDir, "bar.sh"),
command: []string{filepath.Join(scriptDir, "bar.sh")},
},
{
desc: "apply substitution to inline command, success",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
want: []string{"echo", `[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`, "|", "jq", "."},
- mode: 0o755,
- command: []string{"echo", fmt.Sprintf("$(steps.%s.outputs.image)", stepName), "|", "jq", "."},
+ mode: 0755,
+ command: []string{"echo", fmt.Sprintf("$(steps.%s.outputs)", stepName), "|", "jq", "."},
},
{
desc: "apply substitution to inline command, fail to read, command no change",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
- want: []string{"echo", fmt.Sprintf("$(steps.%s.outputs.image)", stepName), "|", "jq", "."},
- mode: 0o000,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ want: []string{"echo", fmt.Sprintf("$(steps.%s.outputs)", stepName), "|", "jq", "."},
+ mode: 0000,
wantErr: true,
- command: []string{"echo", fmt.Sprintf("$(steps.%s.outputs.image)", stepName), "|", "jq", "."},
+ command: []string{"echo", fmt.Sprintf("$(steps.%s.outputs)", stepName), "|", "jq", "."},
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
- if tc.mode == 0o000 && os.Getuid() == 0 {
+ if tc.mode == 0000 && os.Getuid() == 0 {
t.Skipf("Test doesn't work when running with root")
}
stepDir := createTmpDir(t, "")
artifactsPath := getStepArtifactsPath(stepDir, "step-"+stepName)
if tc.fileContent != "" {
- err := os.MkdirAll(filepath.Dir(artifactsPath), 0o755)
+ err := os.MkdirAll(filepath.Dir(artifactsPath), 0755)
if err != nil {
t.Fatalf("fail to create stepDir %v", err)
}
@@ -2032,7 +1608,7 @@ func TestApplyStepArtifactSubstitutionsCommand(t *testing.T) {
}
}
if tc.scriptContent != "" {
- err := os.WriteFile(tc.scriptFile, []byte(tc.scriptContent), 0o755)
+ err := os.WriteFile(tc.scriptFile, []byte(tc.scriptContent), 0755)
if err != nil {
t.Fatalf("failed to write script to scriptFile %v", err)
}
@@ -2070,40 +1646,40 @@ func TestApplyStepArtifactSubstitutionsEnv(t *testing.T) {
}{
{
desc: "apply substitution to env, no matches, no changes",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
- mode: 0o755,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ mode: 0755,
envKey: "aaa",
envValue: "bbb",
want: "bbb",
},
{
desc: "apply substitution to env, matches found, has change",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
- mode: 0o755,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ mode: 0755,
envKey: "aaa",
- envValue: fmt.Sprintf("abc-$(steps.%s.outputs.image)", stepName),
+ envValue: fmt.Sprintf("abc-$(steps.%s.outputs)", stepName),
want: `abc-[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]`,
},
{
desc: "apply substitution to env, matches found, read artifacts failed.",
- fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"image","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
- mode: 0o000,
+ fileContent: `{"inputs":[{"name":"inputs","values":[{"digest":{"sha256":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30"},"uri":"pkg:example.github.com/inputs"}]}],"outputs":[{"name":"output","values":[{"digest":{"sha256":"64d0b157fdf2d7f6548836dd82085fd8401c9481a9f59e554f1b337f134074b0"},"uri":"docker:example.registry.com/outputs"}]}]}`,
+ mode: 0000,
envKey: "aaa",
- envValue: fmt.Sprintf("abc-$(steps.%s.outputs.image)", stepName),
- want: fmt.Sprintf("abc-$(steps.%s.outputs.image)", stepName),
+ envValue: fmt.Sprintf("abc-$(steps.%s.outputs)", stepName),
+ want: fmt.Sprintf("abc-$(steps.%s.outputs)", stepName),
wantErr: true,
},
}
for _, tc := range tests {
t.Run(tc.desc, func(t *testing.T) {
- if tc.mode == 0o000 && os.Getuid() == 0 {
+ if tc.mode == 0000 && os.Getuid() == 0 {
t.Skipf("Test doesn't work when running with root")
}
stepDir := createTmpDir(t, "")
artifactsPath := getStepArtifactsPath(stepDir, "step-"+stepName)
if tc.fileContent != "" {
- err := os.MkdirAll(filepath.Dir(artifactsPath), 0o755)
+ err := os.MkdirAll(filepath.Dir(artifactsPath), 0755)
if err != nil {
t.Fatalf("fail to create stepDir %v", err)
}
@@ -2307,7 +1883,7 @@ func getMockSpireClient(ctx context.Context) (spire.EntrypointerAPIClient, spire
// bootstrap with about 20 calls to sign which should be enough for testing
id := sc.GetIdentity(tr)
- for range 20 {
+ for i := 0; i < 20; i++ {
sc.SignIdentities = append(sc.SignIdentities, id)
}
diff --git a/upstream/pkg/internal/affinityassistant/affinityassistant_types.go b/upstream/pkg/internal/affinityassistant/affinityassistant_types.go
index 15afe705b98..e4fddb4e23d 100644
--- a/upstream/pkg/internal/affinityassistant/affinityassistant_types.go
+++ b/upstream/pkg/internal/affinityassistant/affinityassistant_types.go
@@ -54,9 +54,3 @@ func GetAffinityAssistantBehavior(ctx context.Context) (AffinityAssistantBehavio
return "", fmt.Errorf("unknown combination of disable-affinity-assistant: %v and coschedule: %v", disableAA, coschedule)
}
-
-// ContainerConfig defines AffinityAssistant container configuration
-type ContainerConfig struct {
- Image string
- SetSecurityContext bool
-}
diff --git a/upstream/pkg/internal/defaultresourcerequirements/transformer.go b/upstream/pkg/internal/defaultresourcerequirements/transformer.go
deleted file mode 100644
index 69af7f842d1..00000000000
--- a/upstream/pkg/internal/defaultresourcerequirements/transformer.go
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package defaultresourcerequirements
-
-import (
- "context"
- "strings"
-
- "github.com/tektoncd/pipeline/pkg/apis/config"
- "github.com/tektoncd/pipeline/pkg/pod"
- corev1 "k8s.io/api/core/v1"
-)
-
-// NewTransformer returns a pod.Transformer that will modify container resources if needed
-func NewTransformer(ctx context.Context) pod.Transformer {
- // update init container and containers resource requirements
- // resource limits and requests values are taken from a config map
- configDefaults := config.FromContextOrDefaults(ctx).Defaults
- return func(pod *corev1.Pod) (*corev1.Pod, error) {
- return updateResourceRequirements(configDefaults.DefaultContainerResourceRequirements, pod), nil
- }
-}
-
-// updates init containers and containers resource requirements of a pod base of config_defaults configmap.
-func updateResourceRequirements(resourceRequirementsMap map[string]corev1.ResourceRequirements, pod *corev1.Pod) *corev1.Pod {
- if len(resourceRequirementsMap) == 0 {
- return pod
- }
-
- // collect all the available container names from the resource requirement map
- // some of the container names: place-scripts, prepare, working-dir-initializer
- // some of the container names with prefix: prefix-scripts, prefix-sidecar-scripts
- containerNames := []string{}
- containerNamesWithPrefix := []string{}
- for containerName := range resourceRequirementsMap {
- // skip the default key
- if containerName == config.ResourceRequirementDefaultContainerKey {
- continue
- }
-
- if strings.HasPrefix(containerName, "prefix-") {
- containerNamesWithPrefix = append(containerNamesWithPrefix, containerName)
- } else {
- containerNames = append(containerNames, containerName)
- }
- }
-
- // update the containers resource requirements which does not have resource requirements
- for _, containerName := range containerNames {
- resourceRequirements := resourceRequirementsMap[containerName]
- if resourceRequirements.Size() == 0 {
- continue
- }
-
- // update init containers
- for index := range pod.Spec.InitContainers {
- targetContainer := pod.Spec.InitContainers[index]
- if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
- pod.Spec.InitContainers[index].Resources = resourceRequirements
- }
- }
- // update containers
- for index := range pod.Spec.Containers {
- targetContainer := pod.Spec.Containers[index]
- if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
- pod.Spec.Containers[index].Resources = resourceRequirements
- }
- }
- }
-
- // update the containers resource requirements which does not have resource requirements with the mentioned prefix
- for _, containerPrefix := range containerNamesWithPrefix {
- resourceRequirements := resourceRequirementsMap[containerPrefix]
- if resourceRequirements.Size() == 0 {
- continue
- }
-
- // get actual container name, remove "prefix-" string and append "-" at the end
- // append '-' in the container prefix
- containerPrefix = strings.Replace(containerPrefix, "prefix-", "", 1)
- containerPrefix += "-"
-
- // update init containers
- for index := range pod.Spec.InitContainers {
- targetContainer := pod.Spec.InitContainers[index]
- if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
- pod.Spec.InitContainers[index].Resources = resourceRequirements
- }
- }
- // update containers
- for index := range pod.Spec.Containers {
- targetContainer := pod.Spec.Containers[index]
- if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
- pod.Spec.Containers[index].Resources = resourceRequirements
- }
- }
- }
-
- // reset of the containers resource requirements which has empty resource requirements
- if resourceRequirements, found := resourceRequirementsMap[config.ResourceRequirementDefaultContainerKey]; found && resourceRequirements.Size() != 0 {
- // update init containers
- for index := range pod.Spec.InitContainers {
- if pod.Spec.InitContainers[index].Resources.Size() == 0 {
- pod.Spec.InitContainers[index].Resources = resourceRequirements
- }
- }
- // update containers
- for index := range pod.Spec.Containers {
- if pod.Spec.Containers[index].Resources.Size() == 0 {
- pod.Spec.Containers[index].Resources = resourceRequirements
- }
- }
- }
-
- return pod
-}
diff --git a/upstream/pkg/internal/defaultresourcerequirements/transformer_test.go b/upstream/pkg/internal/defaultresourcerequirements/transformer_test.go
deleted file mode 100644
index 6febc92e63c..00000000000
--- a/upstream/pkg/internal/defaultresourcerequirements/transformer_test.go
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package defaultresourcerequirements
-
-import (
- "context"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/tektoncd/pipeline/pkg/apis/config"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func TestNewTransformer(t *testing.T) {
- testPod := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
- Spec: corev1.PodSpec{
- InitContainers: []corev1.Container{
- {Name: "place-scripts"},
- {Name: "prepare"},
- {Name: "working-dir-initializer"},
- {Name: "test-01"},
- {Name: "foo"},
- },
- Containers: []corev1.Container{
- {Name: "scripts-01"},
- {Name: "scripts-02"},
- {Name: "sidecar-scripts-01"},
- {Name: "sidecar-scripts-02"},
- {Name: "test-01"},
- {Name: "foo"},
- },
- },
- }
-
- tcs := []struct {
- name string
- targetPod *corev1.Pod
- resourceRequirements map[string]corev1.ResourceRequirements
- getExpectedPod func() *corev1.Pod
- }{
- // verifies with no resource requirements data from a config map
- {
- name: "test-with-no-data",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{},
- getExpectedPod: func() *corev1.Pod {
- return testPod.DeepCopy()
- },
- },
-
- // verifies with empty resource requirements data from a config map
- {
- name: "test-with-empty-resource-requirements",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "default": {},
- "place-scripts": {},
- "prefix-scripts": {},
- },
- getExpectedPod: func() *corev1.Pod {
- return testPod.DeepCopy()
- },
- },
-
- // verifies only with 'default' resource requirements data from a config map
- {
- name: "test-with-default-set",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "default": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- },
- },
- getExpectedPod: func() *corev1.Pod {
- expectedPod := testPod.DeepCopy()
- defaultResource := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- }
- expectedPod.Spec = corev1.PodSpec{
- InitContainers: []corev1.Container{
- {Name: "place-scripts", Resources: defaultResource},
- {Name: "prepare", Resources: defaultResource},
- {Name: "working-dir-initializer", Resources: defaultResource},
- {Name: "test-01", Resources: defaultResource},
- {Name: "foo", Resources: defaultResource},
- },
- Containers: []corev1.Container{
- {Name: "scripts-01", Resources: defaultResource},
- {Name: "scripts-02", Resources: defaultResource},
- {Name: "sidecar-scripts-01", Resources: defaultResource},
- {Name: "sidecar-scripts-02", Resources: defaultResource},
- {Name: "test-01", Resources: defaultResource},
- {Name: "foo", Resources: defaultResource},
- },
- }
- return expectedPod
- },
- },
-
- // verifies only with 'place-scripts' resource requirements data from a config map
- {
- name: "test-with-place-scripts-set",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "place-scripts": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- corev1.ResourceCPU: resource.MustParse("200m"),
- },
- },
- },
- getExpectedPod: func() *corev1.Pod {
- expectedPod := testPod.DeepCopy()
- expectedPod.Spec.InitContainers = []corev1.Container{
- {
- Name: "place-scripts",
- Resources: corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- corev1.ResourceCPU: resource.MustParse("200m"),
- },
- },
- },
- {Name: "prepare"},
- {Name: "working-dir-initializer"},
- {Name: "test-01"},
- {Name: "foo"},
- }
- return expectedPod
- },
- },
-
- // verifies only with 'prefix-scripts' resource requirements data from a config map
- {
- name: "test-with-prefix-scripts-set",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "prefix-scripts": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- corev1.ResourceCPU: resource.MustParse("200m"),
- },
- },
- },
- getExpectedPod: func() *corev1.Pod {
- expectedPod := testPod.DeepCopy()
- prefixScripts := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- corev1.ResourceCPU: resource.MustParse("200m"),
- },
- }
- expectedPod.Spec.Containers = []corev1.Container{
- {Name: "scripts-01", Resources: prefixScripts},
- {Name: "scripts-02", Resources: prefixScripts},
- {Name: "sidecar-scripts-01"},
- {Name: "sidecar-scripts-02"},
- {Name: "test-01"},
- {Name: "foo"},
- }
- return expectedPod
- },
- },
-
- // verifies with 'working-dir-initializer', 'prefix-sidecar-scripts', and 'default' resource requirements data from a config map
- {
- name: "test-with_name_prefix_and_default-set",
- targetPod: testPod.DeepCopy(),
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "working-dir-initializer": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("400m"),
- corev1.ResourceMemory: resource.MustParse("512Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- corev1.ResourceCPU: resource.MustParse("250m"),
- },
- },
- "prefix-sidecar-scripts": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("1"),
- corev1.ResourceMemory: resource.MustParse("1Gi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("512Mi"),
- corev1.ResourceCPU: resource.MustParse("500m"),
- },
- },
- "default": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- },
- "prefix-test": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("100m"),
- corev1.ResourceMemory: resource.MustParse("32Mi"),
- },
- },
- "foo": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("200m"),
- corev1.ResourceMemory: resource.MustParse("64Mi"),
- },
- },
- },
- getExpectedPod: func() *corev1.Pod {
- expectedPod := testPod.DeepCopy()
- workDirResourceReqs := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("400m"),
- corev1.ResourceMemory: resource.MustParse("512Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- corev1.ResourceCPU: resource.MustParse("250m"),
- },
- }
- sideCarResourceReqs := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("1"),
- corev1.ResourceMemory: resource.MustParse("1Gi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("512Mi"),
- corev1.ResourceCPU: resource.MustParse("500m"),
- },
- }
- defaultResourceReqs := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- }
-
- testResourceReqs := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("100m"),
- corev1.ResourceMemory: resource.MustParse("32Mi"),
- },
- }
- fooResourceReqs := corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("200m"),
- corev1.ResourceMemory: resource.MustParse("64Mi"),
- },
- }
-
- expectedPod.Spec = corev1.PodSpec{
- InitContainers: []corev1.Container{
- {Name: "place-scripts", Resources: defaultResourceReqs},
- {Name: "prepare", Resources: defaultResourceReqs},
- {Name: "working-dir-initializer", Resources: workDirResourceReqs},
- {Name: "test-01", Resources: testResourceReqs},
- {Name: "foo", Resources: fooResourceReqs},
- },
- Containers: []corev1.Container{
- {Name: "scripts-01", Resources: defaultResourceReqs},
- {Name: "scripts-02", Resources: defaultResourceReqs},
- {Name: "sidecar-scripts-01", Resources: sideCarResourceReqs},
- {Name: "sidecar-scripts-02", Resources: sideCarResourceReqs},
- {Name: "test-01", Resources: testResourceReqs},
- {Name: "foo", Resources: fooResourceReqs},
- },
- }
- return expectedPod
- },
- },
-
- // verifies with existing data
- {
- name: "test-with-existing-data",
- targetPod: &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
- Spec: corev1.PodSpec{
- InitContainers: []corev1.Container{
- {Name: "place-scripts"},
- {Name: "prepare", Resources: corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- }},
- {Name: "working-dir-initializer"},
- },
- Containers: []corev1.Container{
- {Name: "scripts-01"},
- {Name: "scripts-02"},
- {Name: "sidecar-scripts-01"},
- {Name: "sidecar-scripts-02"},
- },
- },
- },
- resourceRequirements: map[string]corev1.ResourceRequirements{
- "prepare": {
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("1"),
- corev1.ResourceMemory: resource.MustParse("512Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- },
- },
- getExpectedPod: func() *corev1.Pod {
- expectedPod := &corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
- Spec: corev1.PodSpec{
- InitContainers: []corev1.Container{
- {Name: "place-scripts"},
- {Name: "prepare", Resources: corev1.ResourceRequirements{
- Limits: corev1.ResourceList{
- corev1.ResourceCPU: resource.MustParse("500m"),
- corev1.ResourceMemory: resource.MustParse("256Mi"),
- },
- Requests: corev1.ResourceList{
- corev1.ResourceMemory: resource.MustParse("128Mi"),
- },
- }},
- {Name: "working-dir-initializer"},
- },
- Containers: []corev1.Container{
- {Name: "scripts-01"},
- {Name: "scripts-02"},
- {Name: "sidecar-scripts-01"},
- {Name: "sidecar-scripts-02"},
- },
- },
- }
- return expectedPod
- },
- },
- }
-
- for _, tc := range tcs {
- t.Run(tc.name, func(t *testing.T) {
- ctx := context.Background()
- // add default container resource requirements on the context
- ctx = config.ToContext(ctx, &config.Config{
- Defaults: &config.Defaults{
- DefaultContainerResourceRequirements: tc.resourceRequirements,
- },
- })
-
- // get the transformer and call the transformer
- transformer := NewTransformer(ctx)
- transformedPod, err := transformer(tc.targetPod)
- if err != nil {
- t.Errorf("unexpected error %s", err)
- }
-
- expectedPod := tc.getExpectedPod()
- if d := cmp.Diff(expectedPod, transformedPod); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/pipelinerunmetrics/metrics.go b/upstream/pkg/pipelinerunmetrics/metrics.go
index 225c689d746..18d2e560fdc 100644
--- a/upstream/pkg/pipelinerunmetrics/metrics.go
+++ b/upstream/pkg/pipelinerunmetrics/metrics.go
@@ -39,13 +39,6 @@ import (
"knative.dev/pkg/metrics"
)
-const (
- runningPRLevelPipelinerun = "pipelinerun"
- runningPRLevelPipeline = "pipeline"
- runningPRLevelNamespace = "namespace"
- runningPRLevelCluster = ""
-)
-
var (
pipelinerunTag = tag.MustNewKey("pipelinerun")
pipelineTag = tag.MustNewKey("pipeline")
@@ -112,7 +105,6 @@ const (
type Recorder struct {
mutex sync.Mutex
initialized bool
- cfg *config.Metrics
insertTag func(pipeline,
pipelinerun string) []tag.Mutator
@@ -141,7 +133,6 @@ func NewRecorder(ctx context.Context) (*Recorder, error) {
}
cfg := config.FromContextOrDefaults(ctx)
- r.cfg = cfg.Metrics
errRegistering = viewRegister(cfg.Metrics)
if errRegistering != nil {
r.initialized = false
@@ -157,6 +148,7 @@ func viewRegister(cfg *config.Metrics) error {
defer r.mutex.Unlock()
var prunTag []tag.Key
+
switch cfg.PipelinerunLevel {
case config.PipelinerunLevelAtPipelinerun:
prunTag = []tag.Key{pipelinerunTag, pipelineTag}
@@ -171,18 +163,6 @@ func viewRegister(cfg *config.Metrics) error {
return errors.New("invalid config for PipelinerunLevel: " + cfg.PipelinerunLevel)
}
- var runningPRTag []tag.Key
- switch cfg.RunningPipelinerunLevel {
- case config.PipelinerunLevelAtPipelinerun:
- runningPRTag = []tag.Key{pipelinerunTag, pipelineTag, namespaceTag}
- case config.PipelinerunLevelAtPipeline:
- runningPRTag = []tag.Key{pipelineTag, namespaceTag}
- case config.PipelinerunLevelAtNS:
- runningPRTag = []tag.Key{namespaceTag}
- default:
- runningPRTag = []tag.Key{}
- }
-
distribution := view.Distribution(10, 30, 60, 300, 900, 1800, 3600, 5400, 10800, 21600, 43200, 86400)
if cfg.PipelinerunLevel == config.PipelinerunLevelAtPipelinerun {
@@ -197,12 +177,6 @@ func viewRegister(cfg *config.Metrics) error {
}
}
- prCountViewTags := []tag.Key{statusTag}
- if cfg.CountWithReason {
- prCountViewTags = append(prCountViewTags, reasonTag)
- prunTag = append(prunTag, reasonTag)
- }
-
prDurationView = &view.View{
Description: prDuration.Description(),
Measure: prDuration,
@@ -210,6 +184,10 @@ func viewRegister(cfg *config.Metrics) error {
TagKeys: append([]tag.Key{statusTag, namespaceTag}, prunTag...),
}
+ prCountViewTags := []tag.Key{statusTag}
+ if cfg.CountWithReason {
+ prCountViewTags = append(prCountViewTags, reasonTag)
+ }
prCountView = &view.View{
Description: prCount.Description(),
Measure: prCount,
@@ -232,7 +210,6 @@ func viewRegister(cfg *config.Metrics) error {
Description: runningPRs.Description(),
Measure: runningPRs,
Aggregation: view.LastValue(),
- TagKeys: runningPRTag,
}
runningPRsWaitingOnPipelineResolutionCountView = &view.View{
@@ -282,8 +259,8 @@ func viewUnregister() {
runningPRsWaitingOnTaskResolutionView)
}
-// OnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
-func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string,
+// MetricsOnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
+func MetricsOnStore(logger *zap.SugaredLogger) func(name string,
value interface{}) {
return func(name string, value interface{}) {
if name == config.GetMetricsConfigName() {
@@ -292,8 +269,6 @@ func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string,
logger.Error("Failed to do type insertion for extracting metrics config")
return
}
- r.updateConfig(cfg)
- // Update metrics according to configuration
viewUnregister()
err := viewRegister(cfg)
if err != nil {
@@ -305,10 +280,8 @@ func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string,
}
func pipelinerunInsertTag(pipeline, pipelinerun string) []tag.Mutator {
- return []tag.Mutator{
- tag.Insert(pipelineTag, pipeline),
- tag.Insert(pipelinerunTag, pipelinerun),
- }
+ return []tag.Mutator{tag.Insert(pipelineTag, pipeline),
+ tag.Insert(pipelinerunTag, pipelinerun)}
}
func pipelineInsertTag(pipeline, pipelinerun string) []tag.Mutator {
@@ -337,16 +310,9 @@ func getPipelineTagName(pr *v1.PipelineRun) string {
return pipelineName
}
-func (r *Recorder) updateConfig(cfg *config.Metrics) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
-
- r.cfg = cfg
-}
-
// DurationAndCount logs the duration of PipelineRun execution and
// count for number of PipelineRuns succeed or failed
-// returns an error if it fails to log the metrics
+// returns an error if its failed to log the metrics
func (r *Recorder) DurationAndCount(pr *v1.PipelineRun, beforeCondition *apis.Condition) error {
if !r.initialized {
return fmt.Errorf("ignoring the metrics recording for %s , failed to initialize the metrics recorder", pr.Name)
@@ -383,10 +349,8 @@ func (r *Recorder) DurationAndCount(pr *v1.PipelineRun, beforeCondition *apis.Co
ctx, err := tag.New(
context.Background(),
- append([]tag.Mutator{
- tag.Insert(namespaceTag, pr.Namespace),
- tag.Insert(statusTag, status), tag.Insert(reasonTag, reason),
- }, r.insertTag(pipelineName, pr.Name)...)...)
+ append([]tag.Mutator{tag.Insert(namespaceTag, pr.Namespace),
+ tag.Insert(statusTag, status), tag.Insert(reasonTag, reason)}, r.insertTag(pipelineName, pr.Name)...)...)
if err != nil {
return err
}
@@ -399,10 +363,11 @@ func (r *Recorder) DurationAndCount(pr *v1.PipelineRun, beforeCondition *apis.Co
}
// RunningPipelineRuns logs the number of PipelineRuns running right now
-// returns an error if it fails to log the metrics
+// returns an error if its failed to log the metrics
func (r *Recorder) RunningPipelineRuns(lister listers.PipelineRunLister) error {
r.mutex.Lock()
defer r.mutex.Unlock()
+
if !r.initialized {
return errors.New("ignoring the metrics recording, failed to initialize the metrics recorder")
}
@@ -415,38 +380,9 @@ func (r *Recorder) RunningPipelineRuns(lister listers.PipelineRunLister) error {
var runningPipelineRuns int
var trsWaitResolvingTaskRef int
var prsWaitResolvingPipelineRef int
- countMap := map[string]int{}
for _, pr := range prs {
- pipelineName := getPipelineTagName(pr)
- pipelineRunKey := ""
- mutators := []tag.Mutator{
- tag.Insert(namespaceTag, pr.Namespace),
- tag.Insert(pipelineTag, pipelineName),
- tag.Insert(pipelinerunTag, pr.Name),
- }
- if r.cfg != nil {
- switch r.cfg.RunningPipelinerunLevel {
- case runningPRLevelPipelinerun:
- pipelineRunKey = pipelineRunKey + "#" + pr.Name
- fallthrough
- case runningPRLevelPipeline:
- pipelineRunKey = pipelineRunKey + "#" + pipelineName
- fallthrough
- case runningPRLevelNamespace:
- pipelineRunKey = pipelineRunKey + "#" + pr.Namespace
- case runningPRLevelCluster:
- default:
- return fmt.Errorf("RunningPipelineRunLevel value \"%s\" is not valid ", r.cfg.RunningPipelinerunLevel)
- }
- }
- ctx_, err_ := tag.New(context.Background(), mutators...)
- if err_ != nil {
- return err
- }
if !pr.IsDone() {
- countMap[pipelineRunKey]++
- metrics.Record(ctx_, runningPRs.M(float64(countMap[pipelineRunKey])))
runningPipelineRuns++
succeedCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
if succeedCondition != nil && succeedCondition.Status == corev1.ConditionUnknown {
@@ -457,13 +393,6 @@ func (r *Recorder) RunningPipelineRuns(lister listers.PipelineRunLister) error {
prsWaitResolvingPipelineRef++
}
}
- } else {
- // In case there are no running PipelineRuns for the pipelineRunKey, set the metric value to 0 to ensure
- // the metric is set for the key.
- if _, exists := countMap[pipelineRunKey]; !exists {
- countMap[pipelineRunKey] = 0
- metrics.Record(ctx_, runningPRs.M(0))
- }
}
}
@@ -476,6 +405,7 @@ func (r *Recorder) RunningPipelineRuns(lister listers.PipelineRunLister) error {
metrics.Record(ctx, runningPRsWaitingOnTaskResolutionCount.M(float64(trsWaitResolvingTaskRef)))
metrics.Record(ctx, runningPRsWaitingOnTaskResolution.M(float64(trsWaitResolvingTaskRef)))
metrics.Record(ctx, runningPRsCount.M(float64(runningPipelineRuns)))
+ metrics.Record(ctx, runningPRs.M(float64(runningPipelineRuns)))
return nil
}
diff --git a/upstream/pkg/pipelinerunmetrics/metrics_test.go b/upstream/pkg/pipelinerunmetrics/metrics_test.go
index 418563e09f4..6048d5e7bbf 100644
--- a/upstream/pkg/pipelinerunmetrics/metrics_test.go
+++ b/upstream/pkg/pipelinerunmetrics/metrics_test.go
@@ -23,9 +23,6 @@ import (
"testing"
"time"
- "go.opencensus.io/metric/metricproducer"
- "go.opencensus.io/stats/view"
-
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/config"
@@ -53,7 +50,6 @@ func getConfigContext(countWithReason bool) context.Context {
Metrics: &config.Metrics{
TaskrunLevel: config.TaskrunLevelAtTaskrun,
PipelinerunLevel: config.PipelinerunLevelAtPipelinerun,
- RunningPipelinerunLevel: config.DefaultRunningPipelinerunLevel,
DurationTaskrunType: config.DefaultDurationTaskrunType,
DurationPipelinerunType: config.DefaultDurationPipelinerunType,
CountWithReason: countWithReason,
@@ -62,21 +58,6 @@ func getConfigContext(countWithReason bool) context.Context {
return config.ToContext(ctx, cfg)
}
-func getConfigContextRunningPRLevel(runningPipelinerunLevel string) context.Context {
- ctx := context.Background()
- cfg := &config.Config{
- Metrics: &config.Metrics{
- TaskrunLevel: config.TaskrunLevelAtTaskrun,
- PipelinerunLevel: config.PipelinerunLevelAtPipelinerun,
- DurationTaskrunType: config.DefaultDurationTaskrunType,
- DurationPipelinerunType: config.DefaultDurationPipelinerunType,
- CountWithReason: false,
- RunningPipelinerunLevel: runningPipelinerunLevel,
- },
- }
- return config.ToContext(ctx, cfg)
-}
-
func TestUninitializedMetrics(t *testing.T) {
metrics := Recorder{}
@@ -88,7 +69,7 @@ func TestUninitializedMetrics(t *testing.T) {
}
}
-func TestOnStore(t *testing.T) {
+func TestMetricsOnStore(t *testing.T) {
log := zap.NewExample()
defer log.Sync()
logger := log.Sugar()
@@ -100,7 +81,7 @@ func TestOnStore(t *testing.T) {
}
// We check that there's no change when incorrect config is passed
- OnStore(logger, metrics)(config.GetMetricsConfigName(), &config.Store{})
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), &config.Store{})
// Comparing function assign to struct with the one which should yield same value
if reflect.ValueOf(metrics.insertTag).Pointer() != reflect.ValueOf(pipelinerunInsertTag).Pointer() {
t.Fatal("metrics recorder shouldn't change during this OnStore call")
@@ -113,7 +94,7 @@ func TestOnStore(t *testing.T) {
DurationTaskrunType: config.DurationTaskrunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
}
- OnStore(logger, metrics)(config.GetMetricsConfigName(), cfg)
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), cfg)
if reflect.ValueOf(metrics.insertTag).Pointer() != reflect.ValueOf(pipelinerunInsertTag).Pointer() {
t.Fatal("metrics recorder shouldn't change during this OnStore call")
}
@@ -124,7 +105,7 @@ func TestOnStore(t *testing.T) {
DurationTaskrunType: config.DurationTaskrunTypeHistogram,
DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
}
- OnStore(logger, metrics)(config.GetMetricsConfigName(), cfg)
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), cfg)
if reflect.ValueOf(metrics.insertTag).Pointer() != reflect.ValueOf(nilInsertTag).Pointer() {
t.Fatal("metrics recorder didn't change during OnStore call")
}
@@ -360,7 +341,6 @@ func TestRecordPipelineRunDurationCount(t *testing.T) {
"pipeline": "pipeline-1",
"pipelinerun": "pipelinerun-1",
"namespace": "ns",
- "reason": "Failed",
"status": "failed",
},
expectedCountTags: map[string]string{
@@ -397,7 +377,6 @@ func TestRecordPipelineRunDurationCount(t *testing.T) {
"pipelinerun": "pipelinerun-1",
"namespace": "ns",
"status": "cancelled",
- "reason": ReasonCancelled.String(),
},
expectedCountTags: map[string]string{
"status": "cancelled",
@@ -523,204 +502,6 @@ func TestRecordRunningPipelineRunsCount(t *testing.T) {
metricstest.CheckLastValueData(t, "running_pipelineruns", map[string]string{}, 1)
}
-func TestRecordRunningPipelineRunsCountAtPipelineRunLevel(t *testing.T) {
- unregisterMetrics()
-
- newPipelineRun := func(status corev1.ConditionStatus, pipelineRun, namespace string) *v1.PipelineRun {
- return &v1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{Name: pipelineRun, Namespace: namespace},
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: status,
- }},
- },
- },
- }
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- informer := fakepipelineruninformer.Get(ctx)
- // Add N randomly-named PipelineRuns with differently-succeeded statuses.
- for _, pipelineRun := range []*v1.PipelineRun{
- newPipelineRun(corev1.ConditionUnknown, "testpr1", "testns1"),
- newPipelineRun(corev1.ConditionUnknown, "testpr1", "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testpr2", "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testpr1", "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testpr2", "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testpr3", "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testpr4", "testns3"),
- } {
- if err := informer.Informer().GetIndexer().Add(pipelineRun); err != nil {
- t.Fatalf("Adding TaskRun to informer: %v", err)
- }
- }
-
- ctx = getConfigContextRunningPRLevel("pipelinerun")
- recorder, err := NewRecorder(ctx)
- if err != nil {
- t.Fatalf("NewRecorder: %v", err)
- }
-
- if err := recorder.RunningPipelineRuns(informer.Lister()); err != nil {
- t.Errorf("RunningPipelineRuns: %v", err)
- }
-
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns1", "pipeline": "anonymous", "pipelinerun": "testpr1"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns2", "pipeline": "anonymous", "pipelinerun": "testpr1"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns2", "pipeline": "anonymous", "pipelinerun": "testpr2"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3", "pipeline": "anonymous", "pipelinerun": "testpr1"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3", "pipeline": "anonymous", "pipelinerun": "testpr2"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3", "pipeline": "anonymous", "pipelinerun": "testpr3"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3", "pipeline": "anonymous", "pipelinerun": "testpr4"}, 1)
-}
-
-func TestRecordRunningPipelineRunsCountAtPipelineLevel(t *testing.T) {
- unregisterMetrics()
-
- newPipelineRun := func(status corev1.ConditionStatus, namespace string) *v1.PipelineRun {
- return &v1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pipelinerun-"), Namespace: namespace},
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: status,
- }},
- },
- },
- }
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- informer := fakepipelineruninformer.Get(ctx)
- // Add N randomly-named PipelineRuns with differently-succeeded statuses.
- for _, pipelineRun := range []*v1.PipelineRun{
- newPipelineRun(corev1.ConditionUnknown, "testns1"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- } {
- if err := informer.Informer().GetIndexer().Add(pipelineRun); err != nil {
- t.Fatalf("Adding TaskRun to informer: %v", err)
- }
- }
-
- ctx = getConfigContextRunningPRLevel("pipeline")
- recorder, err := NewRecorder(ctx)
- if err != nil {
- t.Fatalf("NewRecorder: %v", err)
- }
-
- if err := recorder.RunningPipelineRuns(informer.Lister()); err != nil {
- t.Errorf("RunningPipelineRuns: %v", err)
- }
-
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns1", "pipeline": "anonymous"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns2", "pipeline": "anonymous"}, 2)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3", "pipeline": "anonymous"}, 4)
-}
-
-func TestRecordRunningPipelineRunsCountAtNamespaceLevel(t *testing.T) {
- unregisterMetrics()
-
- newPipelineRun := func(status corev1.ConditionStatus, namespace string) *v1.PipelineRun {
- return &v1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pipelinerun-"), Namespace: namespace},
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: status,
- }},
- },
- },
- }
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- informer := fakepipelineruninformer.Get(ctx)
- // Add N randomly-named PipelineRuns with differently-succeeded statuses.
- for _, pipelineRun := range []*v1.PipelineRun{
- newPipelineRun(corev1.ConditionUnknown, "testns1"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- } {
- if err := informer.Informer().GetIndexer().Add(pipelineRun); err != nil {
- t.Fatalf("Adding TaskRun to informer: %v", err)
- }
- }
-
- ctx = getConfigContextRunningPRLevel("namespace")
- recorder, err := NewRecorder(ctx)
- if err != nil {
- t.Fatalf("NewRecorder: %v", err)
- }
-
- if err := recorder.RunningPipelineRuns(informer.Lister()); err != nil {
- t.Errorf("RunningPipelineRuns: %v", err)
- }
-
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns1"}, 1)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns2"}, 2)
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{"namespace": "testns3"}, 4)
-}
-
-func TestRecordRunningPipelineRunsCountAtClusterLevel(t *testing.T) {
- unregisterMetrics()
-
- newPipelineRun := func(status corev1.ConditionStatus, namespace string) *v1.PipelineRun {
- return &v1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pipelinerun-"), Namespace: namespace},
- Status: v1.PipelineRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: status,
- }},
- },
- },
- }
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- informer := fakepipelineruninformer.Get(ctx)
- // Add N randomly-named PipelineRuns with differently-succeeded statuses.
- for _, pipelineRun := range []*v1.PipelineRun{
- newPipelineRun(corev1.ConditionUnknown, "testns1"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns2"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- newPipelineRun(corev1.ConditionUnknown, "testns3"),
- } {
- if err := informer.Informer().GetIndexer().Add(pipelineRun); err != nil {
- t.Fatalf("Adding TaskRun to informer: %v", err)
- }
- }
-
- ctx = getConfigContextRunningPRLevel("")
- recorder, err := NewRecorder(ctx)
- if err != nil {
- t.Fatalf("NewRecorder: %v", err)
- }
-
- if err := recorder.RunningPipelineRuns(informer.Lister()); err != nil {
- t.Errorf("RunningPipelineRuns: %v", err)
- }
-
- checkLastValueDataForTags(t, "running_pipelineruns", map[string]string{}, 7)
-}
-
func TestRecordRunningPipelineRunsResolutionWaitCounts(t *testing.T) {
multiplier := 3
for _, tc := range []struct {
@@ -771,7 +552,7 @@ func TestRecordRunningPipelineRunsResolutionWaitCounts(t *testing.T) {
unregisterMetrics()
ctx, _ := ttesting.SetupFakeContext(t)
informer := fakepipelineruninformer.Get(ctx)
- for range multiplier {
+ for i := 0; i < multiplier; i++ {
pr := &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pipelinerun-")},
Status: v1.PipelineRunStatus{
@@ -813,40 +594,3 @@ func unregisterMetrics() {
r = nil
errRegistering = nil
}
-
-// We have to write this function as knative package does not provide the feature to validate multiple records for same metric.
-func checkLastValueDataForTags(t *testing.T, name string, wantTags map[string]string, expected float64) {
- t.Helper()
- for _, producer := range metricproducer.GlobalManager().GetAll() {
- meter := producer.(view.Meter)
- data, err := meter.RetrieveData(name)
- if err != nil || len(data) == 0 {
- continue
- }
- val := getLastValueData(data, wantTags)
- if expected != val.Value {
- t.Error("Value did not match for ", name, wantTags, ", expected", expected, "got", val.Value)
- }
- }
-}
-
-// Returns the LastValueData from the matching row. If no row is matched then returns nil
-func getLastValueData(rows []*view.Row, wantTags map[string]string) *view.LastValueData {
- for _, row := range rows {
- if len(wantTags) != len(row.Tags) {
- continue
- }
- matched := true
- for _, got := range row.Tags {
- n := got.Key.Name()
- if wantTags[n] != got.Value {
- matched = false
- break
- }
- }
- if matched {
- return row.Data.(*view.LastValueData)
- }
- }
- return nil
-}
diff --git a/upstream/pkg/pod/creds_init_test.go b/upstream/pkg/pod/creds_init_test.go
index 33f2e32a0be..c7dcacf81c3 100644
--- a/upstream/pkg/pod/creds_init_test.go
+++ b/upstream/pkg/pod/creds_init_test.go
@@ -335,8 +335,8 @@ func TestCredsInit(t *testing.T) {
eventObj := &corev1.Event{}
kubeclient := fakek8s.NewSimpleClientset(c.objs...)
recorder := record.NewFakeRecorder(1000)
- ctx := controller.WithEventRecorder(c.ctx, recorder)
- args, volumes, volumeMounts, err := credsInit(ctx, eventObj, serviceAccountName, namespace, kubeclient)
+ c.ctx = controller.WithEventRecorder(c.ctx, recorder)
+ args, volumes, volumeMounts, err := credsInit(c.ctx, eventObj, serviceAccountName, namespace, kubeclient)
if err != nil {
t.Fatalf("credsInit: %v", err)
}
diff --git a/upstream/pkg/pod/entrypoint.go b/upstream/pkg/pod/entrypoint.go
index fd4893bc6ed..5997131e5bd 100644
--- a/upstream/pkg/pod/entrypoint.go
+++ b/upstream/pkg/pod/entrypoint.go
@@ -126,7 +126,7 @@ var (
// Additionally, Step timeouts are added as entrypoint flag.
func orderContainers(ctx context.Context, commonExtraEntrypointArgs []string, steps []corev1.Container, taskSpec *v1.TaskSpec, breakpointConfig *v1.TaskRunDebug, waitForReadyAnnotation, enableKeepPodOnCancel bool) ([]corev1.Container, error) {
if len(steps) == 0 {
- return nil, errors.New("no steps specified")
+ return nil, errors.New("No steps specified")
}
for i, s := range steps {
@@ -171,17 +171,7 @@ func orderContainers(ctx context.Context, commonExtraEntrypointArgs []string, st
}
// add step results
stepResultArgs := stepResultArgument(taskSpec.Steps[i].Results)
-
argsForEntrypoint = append(argsForEntrypoint, stepResultArgs...)
- if len(taskSpec.Steps[i].When) > 0 {
- // marshal and pass to the entrypoint and unmarshal it there.
- marshal, err := json.Marshal(taskSpec.Steps[i].When)
-
- if err != nil {
- return nil, fmt.Errorf("faile to resolve when %w", err)
- }
- argsForEntrypoint = append(argsForEntrypoint, "--when_expressions", string(marshal))
- }
}
argsForEntrypoint = append(argsForEntrypoint, resultArgument(steps, taskSpec.Results)...)
}
@@ -189,9 +179,6 @@ func orderContainers(ctx context.Context, commonExtraEntrypointArgs []string, st
if breakpointConfig != nil && breakpointConfig.NeedsDebugOnFailure() {
argsForEntrypoint = append(argsForEntrypoint, "-breakpoint_on_failure")
}
- if breakpointConfig != nil && breakpointConfig.NeedsDebugBeforeStep(s.Name) {
- argsForEntrypoint = append(argsForEntrypoint, "-debug_before_step")
- }
cmd, args := s.Command, s.Args
if len(cmd) > 0 {
@@ -355,8 +342,8 @@ func IsContainerStep(name string) bool { return strings.HasPrefix(name, stepPref
// represents a sidecar.
func IsContainerSidecar(name string) bool { return strings.HasPrefix(name, sidecarPrefix) }
-// TrimStepPrefix returns the container name, stripped of its step prefix.
-func TrimStepPrefix(name string) string { return strings.TrimPrefix(name, stepPrefix) }
+// trimStepPrefix returns the container name, stripped of its step prefix.
+func trimStepPrefix(name string) string { return strings.TrimPrefix(name, stepPrefix) }
// TrimSidecarPrefix returns the container name, stripped of its sidecar
// prefix.
diff --git a/upstream/pkg/pod/entrypoint_lookup_impl_test.go b/upstream/pkg/pod/entrypoint_lookup_impl_test.go
index caf0985ed44..6c6178a8b16 100644
--- a/upstream/pkg/pod/entrypoint_lookup_impl_test.go
+++ b/upstream/pkg/pod/entrypoint_lookup_impl_test.go
@@ -68,7 +68,7 @@ func (f *fakeHTTP) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// Check auth if we've fetching the image.
- if strings.HasPrefix(r.URL.Path, "/v2/task") && r.Method == http.MethodGet {
+ if strings.HasPrefix(r.URL.Path, "/v2/task") && r.Method == "GET" {
u, p, ok := r.BasicAuth()
if !ok || username != u || password != p {
w.WriteHeader(http.StatusUnauthorized)
@@ -114,11 +114,9 @@ func TestGetImageWithImagePullSecrets(t *testing.T) {
task := &pipelinev1.Task{
TypeMeta: metav1.TypeMeta{
APIVersion: "tekton.dev/v1",
- Kind: "Task",
- },
+ Kind: "Task"},
ObjectMeta: metav1.ObjectMeta{
- Name: "test-create-image",
- },
+ Name: "test-create-image"},
}
ref, err := remotetest.CreateImageWithAnnotations(u.Host+"/task/test-create-image", remotetest.DefaultObjectAnnotationMapper, task)
diff --git a/upstream/pkg/pod/entrypoint_test.go b/upstream/pkg/pod/entrypoint_test.go
index d2923af47ce..296bedc3cd8 100644
--- a/upstream/pkg/pod/entrypoint_test.go
+++ b/upstream/pkg/pod/entrypoint_test.go
@@ -22,16 +22,14 @@ import (
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/test/diff"
-
- "github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/selection"
fakek8s "k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
@@ -256,82 +254,6 @@ func TestOrderContainersWithDebugOnFailure(t *testing.T) {
}
}
-func TestTestOrderContainersWithDebugBeforeStep(t *testing.T) {
- steps := []corev1.Container{{
- Name: "my-task",
- Image: "step-1",
- Command: []string{"cmd"},
- Args: []string{"arg1", "arg2"},
- }}
- want := []corev1.Container{{
- Name: "my-task",
- Image: "step-1",
- Command: []string{entrypointBinary},
- Args: []string{
- "-wait_file", "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file", "/tekton/run/0/out",
- "-termination_path", "/tekton/termination",
- "-step_metadata_dir", "/tekton/run/0/status", "-debug_before_step",
- "-entrypoint", "cmd", "--",
- "arg1", "arg2",
- },
- VolumeMounts: []corev1.VolumeMount{downwardMount},
- TerminationMessagePath: "/tekton/termination",
- }}
- taskRunDebugConfig := &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- BeforeSteps: []string{"my-task"},
- },
- }
- got, err := orderContainers(context.Background(), []string{}, steps, nil, taskRunDebugConfig, true, false)
- if err != nil {
- t.Fatalf("orderContainers: %v", err)
- }
- if d := cmp.Diff(want, got); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
-}
-
-func TestTestOrderContainersWithAllBreakpoints(t *testing.T) {
- steps := []corev1.Container{{
- Name: "my-task",
- Image: "step-1",
- Command: []string{"cmd"},
- Args: []string{"arg1", "arg2"},
- }}
- want := []corev1.Container{{
- Name: "my-task",
- Image: "step-1",
- Command: []string{entrypointBinary},
- Args: []string{
- "-wait_file", "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file", "/tekton/run/0/out",
- "-termination_path", "/tekton/termination",
- "-step_metadata_dir", "/tekton/run/0/status",
- "-breakpoint_on_failure", "-debug_before_step",
- "-entrypoint", "cmd", "--",
- "arg1", "arg2",
- },
- VolumeMounts: []corev1.VolumeMount{downwardMount},
- TerminationMessagePath: "/tekton/termination",
- }}
- taskRunDebugConfig := &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- OnFailure: "enabled",
- BeforeSteps: []string{"my-task"},
- },
- }
- got, err := orderContainers(context.Background(), []string{}, steps, nil, taskRunDebugConfig, true, false)
- if err != nil {
- t.Fatalf("orderContainers: %v", err)
- }
- if d := cmp.Diff(want, got); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
-}
-
func TestOrderContainersWithEnabelKeepPodOnCancel(t *testing.T) {
steps := []corev1.Container{{
Image: "step-1",
@@ -439,46 +361,6 @@ func TestEntryPointStepActionResults(t *testing.T) {
t.Errorf("Diff %s", diff.PrintWantGot(d))
}
}
-func TestEntryPointStepWhen(t *testing.T) {
- containers := []corev1.Container{{
- Image: "step-1",
- Command: []string{"cmd"},
- Args: []string{"arg1", "arg2"},
- }}
- ts := v1.TaskSpec{Steps: []v1.Step{
- {
- Name: "Test-When",
- Image: "step-1",
- Command: []string{"cmd"},
- Args: []string{"arg1", "arg2"},
- When: v1.StepWhenExpressions{{Input: "foo", Operator: selection.In, Values: []string{"foo", "bar"}}},
- },
- }}
- got, err := orderContainers(context.Background(), []string{}, containers, &ts, nil, true, false)
- if err != nil {
- t.Fatalf("orderContainers: %v", err)
- }
- want := []corev1.Container{{
- Image: "step-1",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file", "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file", "/tekton/run/0/out",
- "-termination_path", "/tekton/termination",
- "-step_metadata_dir", "/tekton/run/0/status",
- "--when_expressions",
- `[{"input":"foo","operator":"in","values":["foo","bar"]}]`,
- "-entrypoint", "cmd", "--",
- "arg1", "arg2",
- },
- VolumeMounts: []corev1.VolumeMount{downwardMount},
- TerminationMessagePath: "/tekton/termination",
- }}
- if d := cmp.Diff(want, got); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
-}
func TestEntryPointResults(t *testing.T) {
taskSpec := v1.TaskSpec{
diff --git a/upstream/pkg/pod/pod.go b/upstream/pkg/pod/pod.go
index 61c40ef92ae..9b117e50b8b 100644
--- a/upstream/pkg/pod/pod.go
+++ b/upstream/pkg/pod/pod.go
@@ -26,7 +26,6 @@ import (
"strconv"
"strings"
- "github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/config"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
@@ -60,8 +59,8 @@ const (
// SpiffeCsiDriver is the CSI storage plugin needed for injection of SPIFFE workload api.
SpiffeCsiDriver = "csi.spiffe.io"
- // OsSelectorLabel is the label Kubernetes uses for OS-specific workloads (https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-os)
- OsSelectorLabel = "kubernetes.io/os"
+ // osSelectorLabel is the label Kubernetes uses for OS-specific workloads (https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-os)
+ osSelectorLabel = "kubernetes.io/os"
// TerminationReasonTimeoutExceeded indicates a step execution timed out.
TerminationReasonTimeoutExceeded = "TimeoutExceeded"
@@ -74,11 +73,6 @@ const (
// TerminationReasonCancelled indicates a step was cancelled.
TerminationReasonCancelled = "Cancelled"
-
- StepArtifactPathPattern = "step.artifacts.path"
-
- // K8s version to determine if to use native k8s sidecar or Tekton sidecar
- SidecarK8sMinorVersionCheck = 29
)
// These are effectively const, but Go doesn't have such an annotation.
@@ -104,9 +98,6 @@ var (
Name: "tekton-internal-steps",
MountPath: pipeline.StepsDir,
ReadOnly: true,
- }, {
- Name: "tekton-internal-artifacts",
- MountPath: pipeline.ArtifactsDir,
}}
implicitVolumes = []corev1.Volume{{
Name: "tekton-internal-workspace",
@@ -120,9 +111,6 @@ var (
}, {
Name: "tekton-internal-steps",
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
- }, {
- Name: "tekton-internal-artifacts",
- VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}},
}}
// MaxActiveDeadlineSeconds is a maximum permitted value to be used for a task with no timeout
@@ -132,10 +120,10 @@ var (
allowPrivilegeEscalation = false
runAsNonRoot = true
- // LinuxSecurityContext allow init containers to run in namespaces
+ // The following security contexts allow init containers to run in namespaces
// with "restricted" pod security admission
// See https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
- LinuxSecurityContext = &corev1.SecurityContext{
+ linuxSecurityContext = &corev1.SecurityContext{
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
@@ -145,7 +133,7 @@ var (
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
}
- WindowsSecurityContext = &corev1.SecurityContext{
+ windowsSecurityContext = &corev1.SecurityContext{
RunAsNonRoot: &runAsNonRoot,
}
)
@@ -213,18 +201,15 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
tasklevel.ApplyTaskLevelComputeResources(steps, taskRun.Spec.ComputeResources)
}
windows := usesWindows(taskRun)
- if sidecarLogsResultsEnabled {
- if taskSpec.Results != nil || artifactsPathReferenced(steps) {
- // create a results sidecar
- resultsSidecar, err := createResultsSidecar(taskSpec, b.Images.SidecarLogResultsImage, setSecurityContext, windows)
- if err != nil {
- return nil, err
- }
- taskSpec.Sidecars = append(taskSpec.Sidecars, resultsSidecar)
- commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-result_from", config.ResultExtractionMethodSidecarLogs)
+ if sidecarLogsResultsEnabled && taskSpec.Results != nil {
+ // create a results sidecar
+ resultsSidecar, err := createResultsSidecar(taskSpec, b.Images.SidecarLogResultsImage, setSecurityContext, windows)
+ if err != nil {
+ return nil, err
}
+ taskSpec.Sidecars = append(taskSpec.Sidecars, resultsSidecar)
+ commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-result_from", config.ResultExtractionMethodSidecarLogs)
}
-
sidecars, err := v1.MergeSidecarsWithSpecs(taskSpec.Sidecars, taskRun.Spec.SidecarSpecs)
if err != nil {
return nil, err
@@ -246,7 +231,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
initContainers = append(initContainers, *scriptsInit)
volumes = append(volumes, scriptsVolume)
}
- if alphaAPIEnabled && taskRun.Spec.Debug != nil && taskRun.Spec.Debug.NeedsDebug() {
+ if alphaAPIEnabled && taskRun.Spec.Debug != nil {
volumes = append(volumes, debugScriptsVolume, debugInfoVolume)
}
// Initialize any workingDirs under /workspace.
@@ -336,7 +321,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
// Each step should only mount their own volume as RW,
// all other steps should be mounted RO.
volumes = append(volumes, runVolume(i))
- for j := range stepContainers {
+ for j := 0; j < len(stepContainers); j++ {
s.VolumeMounts = append(s.VolumeMounts, runMount(j, i != j))
}
@@ -354,30 +339,25 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
stepContainers[i].VolumeMounts = vms
}
- if sidecarLogsResultsEnabled {
+ if sidecarLogsResultsEnabled && taskSpec.Results != nil {
// Mount implicit volumes onto sidecarContainers
// so that they can access /tekton/results and /tekton/run.
- if taskSpec.Results != nil || artifactsPathReferenced(steps) {
- for i, s := range sidecarContainers {
- if s.Name != pipeline.ReservedResultsSidecarName {
- continue
- }
- for j := range stepContainers {
- s.VolumeMounts = append(s.VolumeMounts, runMount(j, true))
- }
- requestedVolumeMounts := map[string]bool{}
- for _, vm := range s.VolumeMounts {
- requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true
- }
- var toAdd []corev1.VolumeMount
- for _, imp := range volumeMounts {
- if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] {
- toAdd = append(toAdd, imp)
- }
+ for i, s := range sidecarContainers {
+ for j := 0; j < len(stepContainers); j++ {
+ s.VolumeMounts = append(s.VolumeMounts, runMount(j, true))
+ }
+ requestedVolumeMounts := map[string]bool{}
+ for _, vm := range s.VolumeMounts {
+ requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true
+ }
+ var toAdd []corev1.VolumeMount
+ for _, imp := range volumeMounts {
+ if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] {
+ toAdd = append(toAdd, imp)
}
- vms := append(s.VolumeMounts, toAdd...) //nolint:gocritic
- sidecarContainers[i].VolumeMounts = vms
}
+ vms := append(s.VolumeMounts, toAdd...) //nolint:gocritic
+ sidecarContainers[i].VolumeMounts = vms
}
}
@@ -431,41 +411,11 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
}
mergedPodContainers := stepContainers
- mergedPodInitContainers := initContainers
-
- // Check if current k8s version is less than 1.29
- // Since Kubernetes Major version cannot be 0 and if it's 2 then sidecar will be in
- // we are only concerned about major version 1 and if the minor is less than 29 then
- // we need to do the current logic
- useTektonSidecar := true
- if config.FromContextOrDefaults(ctx).FeatureFlags.EnableKubernetesSidecar {
- // Go through the logic for enable-kubernetes feature flag
- // Kubernetes Version
- dc := b.KubeClient.Discovery()
- sv, err := dc.ServerVersion()
- if err != nil {
- return nil, err
- }
- svMinorInt, _ := strconv.Atoi(sv.Minor)
- svMajorInt, _ := strconv.Atoi(sv.Major)
- if svMajorInt >= 1 && svMinorInt >= SidecarK8sMinorVersionCheck {
- // Add RestartPolicy and Merge into initContainer
- useTektonSidecar = false
- for i := range sidecarContainers {
- sc := &sidecarContainers[i]
- always := corev1.ContainerRestartPolicyAlways
- sc.RestartPolicy = &always
- sc.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", sidecarPrefix, sc.Name))
- mergedPodInitContainers = append(mergedPodInitContainers, *sc)
- }
- }
- }
- if useTektonSidecar {
- // Merge sidecar containers with step containers.
- for _, sc := range sidecarContainers {
- sc.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", sidecarPrefix, sc.Name))
- mergedPodContainers = append(mergedPodContainers, sc)
- }
+
+ // Merge sidecar containers with step containers.
+ for _, sc := range sidecarContainers {
+ sc.Name = names.SimpleNameGenerator.RestrictLength(fmt.Sprintf("%v%v", sidecarPrefix, sc.Name))
+ mergedPodContainers = append(mergedPodContainers, sc)
}
var dnsPolicy corev1.DNSPolicy
@@ -514,7 +464,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
- InitContainers: mergedPodInitContainers,
+ InitContainers: initContainers,
Containers: mergedPodContainers,
ServiceAccountName: taskRun.Spec.ServiceAccountName,
Volumes: volumes,
@@ -544,9 +494,106 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1.TaskRun, taskSpec v1.Ta
}
}
+ // update init container and containers resource requirements
+ // resource limits values are taken from a config map
+ configDefaults := config.FromContextOrDefaults(ctx).Defaults
+ updateResourceRequirements(configDefaults.DefaultContainerResourceRequirements, newPod)
+
return newPod, nil
}
+// updates init containers and containers resource requirements of a pod base of config_defaults configmap.
+func updateResourceRequirements(resourceRequirementsMap map[string]corev1.ResourceRequirements, pod *corev1.Pod) {
+ if len(resourceRequirementsMap) == 0 {
+ return
+ }
+
+ // collect all the available container names from the resource requirement map
+ // some of the container names: place-scripts, prepare, working-dir-initializer
+ // some of the container names with prefix: prefix-scripts, prefix-sidecar-scripts
+ containerNames := []string{}
+ containerNamesWithPrefix := []string{}
+ for containerName := range resourceRequirementsMap {
+ // skip the default key
+ if containerName == config.ResourceRequirementDefaultContainerKey {
+ continue
+ }
+
+ if strings.HasPrefix(containerName, "prefix-") {
+ containerNamesWithPrefix = append(containerNamesWithPrefix, containerName)
+ } else {
+ containerNames = append(containerNames, containerName)
+ }
+ }
+
+ // update the containers resource requirements which does not have resource requirements
+ for _, containerName := range containerNames {
+ resourceRequirements := resourceRequirementsMap[containerName]
+ if resourceRequirements.Size() == 0 {
+ continue
+ }
+
+ // update init containers
+ for index := range pod.Spec.InitContainers {
+ targetContainer := pod.Spec.InitContainers[index]
+ if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
+ pod.Spec.InitContainers[index].Resources = resourceRequirements
+ }
+ }
+ // update containers
+ for index := range pod.Spec.Containers {
+ targetContainer := pod.Spec.Containers[index]
+ if containerName == targetContainer.Name && targetContainer.Resources.Size() == 0 {
+ pod.Spec.Containers[index].Resources = resourceRequirements
+ }
+ }
+ }
+
+ // update the containers resource requirements which does not have resource requirements with the mentioned prefix
+ for _, containerPrefix := range containerNamesWithPrefix {
+ resourceRequirements := resourceRequirementsMap[containerPrefix]
+ if resourceRequirements.Size() == 0 {
+ continue
+ }
+
+ // get actual container name, remove "prefix-" string and append "-" at the end
+ // append '-' in the container prefix
+ containerPrefix = strings.Replace(containerPrefix, "prefix-", "", 1)
+ containerPrefix += "-"
+
+ // update init containers
+ for index := range pod.Spec.InitContainers {
+ targetContainer := pod.Spec.InitContainers[index]
+ if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
+ pod.Spec.InitContainers[index].Resources = resourceRequirements
+ }
+ }
+ // update containers
+ for index := range pod.Spec.Containers {
+ targetContainer := pod.Spec.Containers[index]
+ if strings.HasPrefix(targetContainer.Name, containerPrefix) && targetContainer.Resources.Size() == 0 {
+ pod.Spec.Containers[index].Resources = resourceRequirements
+ }
+ }
+ }
+
+ // reset of the containers resource requirements which has empty resource requirements
+ if resourceRequirements, found := resourceRequirementsMap[config.ResourceRequirementDefaultContainerKey]; found && resourceRequirements.Size() != 0 {
+ // update init containers
+ for index := range pod.Spec.InitContainers {
+ if pod.Spec.InitContainers[index].Resources.Size() == 0 {
+ pod.Spec.InitContainers[index].Resources = resourceRequirements
+ }
+ }
+ // update containers
+ for index := range pod.Spec.Containers {
+ if pod.Spec.Containers[index].Resources.Size() == 0 {
+ pod.Spec.Containers[index].Resources = resourceRequirements
+ }
+ }
+ }
+}
+
// makeLabels constructs the labels we will propagate from TaskRuns to Pods.
func makeLabels(s *v1.TaskRun) map[string]string {
labels := make(map[string]string, len(s.ObjectMeta.Labels)+1)
@@ -561,7 +608,6 @@ func makeLabels(s *v1.TaskRun) map[string]string {
// NB: Set this *after* passing through TaskRun Labels. If the TaskRun
// specifies this label, it should be overridden by this value.
labels[pipeline.TaskRunLabelKey] = s.Name
- labels[pipeline.TaskRunUIDLabelKey] = string(s.UID)
return labels
}
@@ -607,9 +653,9 @@ func entrypointInitContainer(image string, steps []v1.Step, setSecurityContext,
command = append(command, StepName(s.Name, i))
}
volumeMounts := []corev1.VolumeMount{binMount, internalStepsMount}
- securityContext := LinuxSecurityContext
+ securityContext := linuxSecurityContext
if windows {
- securityContext = WindowsSecurityContext
+ securityContext = windowsSecurityContext
}
// Rewrite steps with entrypoint binary. Append the entrypoint init
@@ -641,19 +687,8 @@ func createResultsSidecar(taskSpec v1.TaskSpec, image string, setSecurityContext
for _, r := range taskSpec.Results {
names = append(names, r.Name)
}
-
- stepNames := make([]string, 0, len(taskSpec.Steps))
- var artifactProducerSteps []string
- for i, s := range taskSpec.Steps {
- stepName := StepName(s.Name, i)
- stepNames = append(stepNames, stepName)
- if artifactPathReferencedInStep(s) {
- artifactProducerSteps = append(artifactProducerSteps, GetContainerName(s.Name))
- }
- }
-
resultsStr := strings.Join(names, ",")
- command := []string{"/ko-app/sidecarlogresults", "-results-dir", pipeline.DefaultResultPath, "-result-names", resultsStr, "-step-names", strings.Join(artifactProducerSteps, ",")}
+ command := []string{"/ko-app/sidecarlogresults", "-results-dir", pipeline.DefaultResultPath, "-result-names", resultsStr}
// create a map of container Name to step results
stepResults := map[string][]string{}
@@ -679,9 +714,9 @@ func createResultsSidecar(taskSpec v1.TaskSpec, image string, setSecurityContext
Image: image,
Command: command,
}
- securityContext := LinuxSecurityContext
+ securityContext := linuxSecurityContext
if windows {
- securityContext = WindowsSecurityContext
+ securityContext = windowsSecurityContext
}
if setSecurityContext {
sidecar.SecurityContext = securityContext
@@ -696,42 +731,6 @@ func usesWindows(tr *v1.TaskRun) bool {
if tr.Spec.PodTemplate == nil || tr.Spec.PodTemplate.NodeSelector == nil {
return false
}
- osSelector := tr.Spec.PodTemplate.NodeSelector[OsSelectorLabel]
+ osSelector := tr.Spec.PodTemplate.NodeSelector[osSelectorLabel]
return osSelector == "windows"
}
-
-func artifactsPathReferenced(steps []v1.Step) bool {
- for _, step := range steps {
- if artifactPathReferencedInStep(step) {
- return true
- }
- }
- return false
-}
-
-func artifactPathReferencedInStep(step v1.Step) bool {
- // `$(step.artifacts.path)` in taskRun.Spec.TaskSpec.Steps and `taskSpec.steps` are substituted when building the pod while when setting status for taskRun
- // neither of them is substituted, so we need two forms to check if artifactsPath is referenced in steps.
- unresolvedPath := "$(" + artifactref.StepArtifactPathPattern + ")"
-
- path := filepath.Join(pipeline.StepsDir, GetContainerName(step.Name), "artifacts", "provenance.json")
- if strings.Contains(step.Script, path) || strings.Contains(step.Script, unresolvedPath) {
- return true
- }
- for _, arg := range step.Args {
- if strings.Contains(arg, path) || strings.Contains(arg, unresolvedPath) {
- return true
- }
- }
- for _, c := range step.Command {
- if strings.Contains(c, path) || strings.Contains(c, unresolvedPath) {
- return true
- }
- }
- for _, e := range step.Env {
- if strings.Contains(e.Value, path) || strings.Contains(e.Value, unresolvedPath) {
- return true
- }
- }
- return false
-}
diff --git a/upstream/pkg/pod/pod_test.go b/upstream/pkg/pod/pod_test.go
index 4b5fc667cc0..0ff4e0f0154 100644
--- a/upstream/pkg/pod/pod_test.go
+++ b/upstream/pkg/pod/pod_test.go
@@ -37,9 +37,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/version"
- fakediscovery "k8s.io/client-go/discovery/fake"
fakek8s "k8s.io/client-go/kubernetes/fake"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
@@ -1940,7 +1937,7 @@ _EOF_
},
},
{
- desc: "sidecar logs enabled, artifacts not enabled",
+ desc: "sidecar logs enabled",
featureFlags: map[string]string{"results-from": "sidecar-logs"},
ts: v1.TaskSpec{
Results: []v1.TaskResult{{
@@ -1994,8 +1991,6 @@ _EOF_
"/tekton/results",
"-result-names",
"foo",
- "-step-names",
- "",
"-step-results",
"{}",
},
@@ -2015,7 +2010,7 @@ _EOF_
},
},
{
- desc: "sidecar logs enabled with step results, artifacts not enabled",
+ desc: "sidecar logs enabled with step results",
featureFlags: map[string]string{"results-from": "sidecar-logs"},
ts: v1.TaskSpec{
Results: []v1.TaskResult{{
@@ -2075,8 +2070,6 @@ _EOF_
"/tekton/results",
"-result-names",
"foo",
- "-step-names",
- "",
"-step-results",
"{\"step-name\":[\"step-foo\"]}",
},
@@ -2096,7 +2089,7 @@ _EOF_
},
},
{
- desc: "sidecar logs enabled and artifacts not enabled, set security context is true",
+ desc: "sidecar logs enabled with security context",
featureFlags: map[string]string{"results-from": "sidecar-logs", "set-security-context": "true"},
ts: v1.TaskSpec{
Results: []v1.TaskResult{{
@@ -2150,8 +2143,6 @@ _EOF_
"/tekton/results",
"-result-names",
"foo",
- "-step-names",
- "",
"-step-results",
"{}",
},
@@ -2162,248 +2153,7 @@ _EOF_
{Name: "tekton-internal-bin", ReadOnly: true, MountPath: "/tekton/bin"},
{Name: "tekton-internal-run-0", ReadOnly: true, MountPath: "/tekton/run/0"},
}, implicitVolumeMounts...),
- SecurityContext: LinuxSecurityContext,
- }},
- Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
- Name: "tekton-creds-init-home-0",
- VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
- }),
- ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
- },
- },
- {
- desc: "sidecar logs enabled and artifacts referenced",
- featureFlags: map[string]string{"results-from": "sidecar-logs", "enable-artifacts": "true"},
- ts: v1.TaskSpec{
- Results: []v1.TaskResult{{
- Name: "foo",
- Type: v1.ResultsTypeString,
- }},
- Steps: []v1.Step{{
- Name: "name",
- Image: "image",
- Command: []string{"echo", "aaa", ">>>", "/tekton/steps/step-name/artifacts/provenance.json"}, // avoid entrypoint lookup.
- }},
- },
- want: &corev1.PodSpec{
- RestartPolicy: corev1.RestartPolicyNever,
- InitContainers: []corev1.Container{
- entrypointInitContainer(images.EntrypointImage, []v1.Step{{Name: "name"}}, false /* setSecurityContext */, false /* windows */),
- },
- Containers: []corev1.Container{{
- Name: "step-name",
- Image: "image",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file",
- "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file",
- "/tekton/run/0/out",
- "-termination_path",
- "/tekton/termination",
- "-step_metadata_dir",
- "/tekton/run/0/status",
- "-result_from",
- "sidecar-logs",
- "-results",
- "foo",
- "-entrypoint",
- "echo",
- "--",
- "aaa",
- ">>>",
- "/tekton/steps/step-name/artifacts/provenance.json",
- },
- VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
- Name: "tekton-creds-init-home-0",
- MountPath: "/tekton/creds",
- }}, implicitVolumeMounts...),
- TerminationMessagePath: "/tekton/termination",
- }, {
- Name: pipeline.ReservedResultsSidecarContainerName,
- Image: "",
- Command: []string{
- "/ko-app/sidecarlogresults",
- "-results-dir",
- "/tekton/results",
- "-result-names",
- "foo",
- "-step-names",
- "step-name",
- "-step-results",
- "{}",
- },
- Resources: corev1.ResourceRequirements{
- Requests: nil,
- },
- VolumeMounts: append([]corev1.VolumeMount{
- {Name: "tekton-internal-bin", ReadOnly: true, MountPath: "/tekton/bin"},
- {Name: "tekton-internal-run-0", ReadOnly: true, MountPath: "/tekton/run/0"},
- }, implicitVolumeMounts...),
- }},
- Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
- Name: "tekton-creds-init-home-0",
- VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
- }),
- ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
- },
- },
- {
- desc: "sidecar logs enabled with step results, artifacts referenced",
- featureFlags: map[string]string{"results-from": "sidecar-logs", "enable-artifacts": "true"},
- ts: v1.TaskSpec{
- Results: []v1.TaskResult{{
- Name: "foo",
- Type: v1.ResultsTypeString,
- }},
- Steps: []v1.Step{{
- Name: "name",
- Results: []v1.StepResult{{
- Name: "step-foo",
- Type: v1.ResultsTypeString,
- }},
- Image: "image",
- Command: []string{"echo", "aaa", ">>>", "/tekton/steps/step-name/artifacts/provenance.json"}, //
- }},
- },
- want: &corev1.PodSpec{
- RestartPolicy: corev1.RestartPolicyNever,
- InitContainers: []corev1.Container{
- entrypointInitContainer(images.EntrypointImage, []v1.Step{{Name: "name"}}, false /* setSecurityContext */, false /* windows */),
- },
- Containers: []corev1.Container{{
- Name: "step-name",
- Image: "image",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file",
- "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file",
- "/tekton/run/0/out",
- "-termination_path",
- "/tekton/termination",
- "-step_metadata_dir",
- "/tekton/run/0/status",
- "-result_from",
- "sidecar-logs",
- "-step_results",
- "step-foo",
- "-results",
- "foo",
- "-entrypoint",
- "echo",
- "--",
- "aaa",
- ">>>",
- "/tekton/steps/step-name/artifacts/provenance.json",
- },
- VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
- Name: "tekton-creds-init-home-0",
- MountPath: "/tekton/creds",
- }}, implicitVolumeMounts...),
- TerminationMessagePath: "/tekton/termination",
- }, {
- Name: pipeline.ReservedResultsSidecarContainerName,
- Image: "",
- Command: []string{
- "/ko-app/sidecarlogresults",
- "-results-dir",
- "/tekton/results",
- "-result-names",
- "foo",
- "-step-names",
- "step-name",
- "-step-results",
- "{\"step-name\":[\"step-foo\"]}",
- },
- Resources: corev1.ResourceRequirements{
- Requests: nil,
- },
- VolumeMounts: append([]corev1.VolumeMount{
- {Name: "tekton-internal-bin", ReadOnly: true, MountPath: "/tekton/bin"},
- {Name: "tekton-internal-run-0", ReadOnly: true, MountPath: "/tekton/run/0"},
- }, implicitVolumeMounts...),
- }},
- Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
- Name: "tekton-creds-init-home-0",
- VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}},
- }),
- ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds,
- },
- },
- {
- desc: "sidecar logs enabled, artifacts referenced and security context set ",
- featureFlags: map[string]string{"results-from": "sidecar-logs", "set-security-context": "true", "enable-artifacts": "true"},
- ts: v1.TaskSpec{
- Results: []v1.TaskResult{{
- Name: "foo",
- Type: v1.ResultsTypeString,
- }},
- Steps: []v1.Step{{
- Name: "name",
- Image: "image",
- Command: []string{"echo", "aaa", ">>>", "/tekton/steps/step-name/artifacts/provenance.json"},
- }},
- },
- want: &corev1.PodSpec{
- RestartPolicy: corev1.RestartPolicyNever,
- InitContainers: []corev1.Container{
- entrypointInitContainer(images.EntrypointImage, []v1.Step{{Name: "name"}}, true /* setSecurityContext */, false /* windows */),
- },
- Containers: []corev1.Container{{
- Name: "step-name",
- Image: "image",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file",
- "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file",
- "/tekton/run/0/out",
- "-termination_path",
- "/tekton/termination",
- "-step_metadata_dir",
- "/tekton/run/0/status",
- "-result_from",
- "sidecar-logs",
- "-results",
- "foo",
- "-entrypoint",
- "echo",
- "--",
- "aaa",
- ">>>",
- "/tekton/steps/step-name/artifacts/provenance.json",
- },
- VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, {
- Name: "tekton-creds-init-home-0",
- MountPath: "/tekton/creds",
- }}, implicitVolumeMounts...),
- TerminationMessagePath: "/tekton/termination",
- }, {
- Name: pipeline.ReservedResultsSidecarContainerName,
- Image: "",
- Command: []string{
- "/ko-app/sidecarlogresults",
- "-results-dir",
- "/tekton/results",
- "-result-names",
- "foo",
- "-step-names",
- "step-name",
- "-step-results",
- "{}",
- },
- Resources: corev1.ResourceRequirements{
- Requests: nil,
- },
- VolumeMounts: append([]corev1.VolumeMount{
- {Name: "tekton-internal-bin", ReadOnly: true, MountPath: "/tekton/bin"},
- {Name: "tekton-internal-run-0", ReadOnly: true, MountPath: "/tekton/run/0"},
- }, implicitVolumeMounts...),
- SecurityContext: LinuxSecurityContext,
+ SecurityContext: linuxSecurityContext,
}},
Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{
Name: "tekton-creds-init-home-0",
@@ -3251,17 +3001,14 @@ func verifyTaskLevelComputeResources(expectedComputeResources []ExpectedComputeR
func TestMakeLabels(t *testing.T) {
taskRunName := "task-run-name"
- taskRunUID := types.UID("taskrunuid")
want := map[string]string{
- pipeline.TaskRunLabelKey: taskRunName,
- "foo": "bar",
- "hello": "world",
- pipeline.TaskRunUIDLabelKey: string(taskRunUID),
+ pipeline.TaskRunLabelKey: taskRunName,
+ "foo": "bar",
+ "hello": "world",
}
got := makeLabels(&v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: taskRunName,
- UID: taskRunUID,
Labels: map[string]string{
"foo": "bar",
"hello": "world",
@@ -3424,7 +3171,7 @@ func TestPrepareInitContainers(t *testing.T) {
WorkingDir: "/",
Command: []string{"/ko-app/entrypoint", "init", "/ko-app/entrypoint", entrypointBinary, "step-foo", "step-bar"},
VolumeMounts: []corev1.VolumeMount{binMount, internalStepsMount},
- SecurityContext: LinuxSecurityContext,
+ SecurityContext: linuxSecurityContext,
},
}, {
name: "nothing-special-two-steps-windows",
@@ -3456,7 +3203,7 @@ func TestPrepareInitContainers(t *testing.T) {
WorkingDir: "/",
Command: []string{"/ko-app/entrypoint", "init", "/ko-app/entrypoint", entrypointBinary, "step-foo", "step-bar"},
VolumeMounts: []corev1.VolumeMount{binMount, internalStepsMount},
- SecurityContext: WindowsSecurityContext,
+ SecurityContext: windowsSecurityContext,
},
}}
for _, tc := range tcs {
@@ -3485,13 +3232,13 @@ func TestUsesWindows(t *testing.T) {
}, {
name: "uses linux",
taskRun: &v1.TaskRun{Spec: v1.TaskRunSpec{PodTemplate: &pod.Template{NodeSelector: map[string]string{
- OsSelectorLabel: "linux",
+ osSelectorLabel: "linux",
}}}},
want: false,
}, {
name: "uses windows",
taskRun: &v1.TaskRun{Spec: v1.TaskRunSpec{PodTemplate: &pod.Template{NodeSelector: map[string]string{
- OsSelectorLabel: "windows",
+ osSelectorLabel: "windows",
}}}},
want: true,
}}
@@ -3505,265 +3252,377 @@ func TestUsesWindows(t *testing.T) {
}
}
-func Test_artifactsPathReferenced(t *testing.T) {
- tests := []struct {
- name string
- steps []v1.Step
- want bool
- }{
- {
- name: "No Steps",
- steps: []v1.Step{},
- want: false,
+func TestUpdateResourceRequirements(t *testing.T) {
+ testPod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
+ Spec: corev1.PodSpec{
+ InitContainers: []corev1.Container{
+ {Name: "place-scripts"},
+ {Name: "prepare"},
+ {Name: "working-dir-initializer"},
+ {Name: "test-01"},
+ {Name: "foo"},
+ },
+ Containers: []corev1.Container{
+ {Name: "scripts-01"},
+ {Name: "scripts-02"},
+ {Name: "sidecar-scripts-01"},
+ {Name: "sidecar-scripts-02"},
+ {Name: "test-01"},
+ {Name: "foo"},
+ },
},
+ }
+
+ tcs := []struct {
+ name string
+ targetPod *corev1.Pod
+ resourceRequirements map[string]corev1.ResourceRequirements
+ getExpectedPod func() *corev1.Pod
+ }{
+ // verifies with no resource requirements data from a config map
{
- name: "No Reference",
- steps: []v1.Step{
- {
- Name: "name",
- Script: "echo hello",
- Command: []string{"echo", "hello"},
- },
+ name: "test-with-no-data",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{},
+ getExpectedPod: func() *corev1.Pod {
+ return testPod.DeepCopy()
},
- want: false,
},
+
+ // verifies with empty resource requirements data from a config map
{
- name: "Reference in Script",
- steps: []v1.Step{
- {
- Name: "name",
- Script: "echo aaa >> /tekton/steps/step-name/artifacts/provenance.json",
- },
+ name: "test-with-empty-resource-requirements",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "default": {},
+ "place-scripts": {},
+ "prefix-scripts": {},
+ },
+ getExpectedPod: func() *corev1.Pod {
+ return testPod.DeepCopy()
},
- want: true,
},
+
+ // verifies only with 'default' resource requirements data from a config map
{
- name: "Reference in Args",
- steps: []v1.Step{
- {
- Name: "name",
- Command: []string{"cat"},
- Args: []string{"/tekton/steps/step-name/artifacts/provenance.json"},
+ name: "test-with-default-set",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "default": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
},
},
- want: true,
+ getExpectedPod: func() *corev1.Pod {
+ expectedPod := testPod.DeepCopy()
+ defaultResource := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ }
+ expectedPod.Spec = corev1.PodSpec{
+ InitContainers: []corev1.Container{
+ {Name: "place-scripts", Resources: defaultResource},
+ {Name: "prepare", Resources: defaultResource},
+ {Name: "working-dir-initializer", Resources: defaultResource},
+ {Name: "test-01", Resources: defaultResource},
+ {Name: "foo", Resources: defaultResource},
+ },
+ Containers: []corev1.Container{
+ {Name: "scripts-01", Resources: defaultResource},
+ {Name: "scripts-02", Resources: defaultResource},
+ {Name: "sidecar-scripts-01", Resources: defaultResource},
+ {Name: "sidecar-scripts-02", Resources: defaultResource},
+ {Name: "test-01", Resources: defaultResource},
+ {Name: "foo", Resources: defaultResource},
+ },
+ }
+ return expectedPod
+ },
},
+
+ // verifies only with 'place-scripts' resource requirements data from a config map
{
- name: "Reference in Command",
- steps: []v1.Step{
- {
- Name: "name",
- Command: []string{"cat", "/tekton/steps/step-name/artifacts/provenance.json"},
+ name: "test-with-place-scripts-set",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "place-scripts": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ },
},
},
- want: true,
- },
- {
- name: "Reference in Env",
- steps: []v1.Step{
- {
- Name: "name",
- Env: []corev1.EnvVar{
- {
- Name: "MY_VAR",
- Value: "/tekton/steps/step-name/artifacts/provenance.json",
+ getExpectedPod: func() *corev1.Pod {
+ expectedPod := testPod.DeepCopy()
+ expectedPod.Spec.InitContainers = []corev1.Container{
+ {
+ Name: "place-scripts",
+ Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ },
},
},
- },
+ {Name: "prepare"},
+ {Name: "working-dir-initializer"},
+ {Name: "test-01"},
+ {Name: "foo"},
+ }
+ return expectedPod
},
- want: true,
},
+
+ // verifies only with 'prefix-scripts' resource requirements data from a config map
{
- name: "Unresolved reference in Script",
- steps: []v1.Step{
- {
- Name: "name",
- Script: "echo aaa >> $(step.artifacts.path)",
+ name: "test-with-prefix-scripts-set",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "prefix-scripts": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ },
},
},
- want: true,
- },
- {
- name: "Unresolved reference in Args",
- steps: []v1.Step{
- {
- Name: "name",
- Command: []string{"cat"},
- Args: []string{"$(step.artifacts.path)"},
- },
+ getExpectedPod: func() *corev1.Pod {
+ expectedPod := testPod.DeepCopy()
+ prefixScripts := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ },
+ }
+ expectedPod.Spec.Containers = []corev1.Container{
+ {Name: "scripts-01", Resources: prefixScripts},
+ {Name: "scripts-02", Resources: prefixScripts},
+ {Name: "sidecar-scripts-01"},
+ {Name: "sidecar-scripts-02"},
+ {Name: "test-01"},
+ {Name: "foo"},
+ }
+ return expectedPod
},
- want: true,
},
+
+ // verifies with 'working-dir-initializer', 'prefix-sidecar-scripts', and 'default' resource requirements data from a config map
{
- name: "Unresolved reference in Command",
- steps: []v1.Step{
- {
- Name: "name",
- Command: []string{"cat", "$(step.artifacts.path)"},
+ name: "test-with_name_prefix_and_default-set",
+ targetPod: testPod.DeepCopy(),
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "working-dir-initializer": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("400m"),
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ corev1.ResourceCPU: resource.MustParse("250m"),
+ },
},
- },
- want: true,
- },
- {
- name: "Unresolved reference in Env",
- steps: []v1.Step{
- {
- Name: "name",
- Env: []corev1.EnvVar{
- {
- Name: "MY_VAR",
- Value: "$(step.artifacts.path)",
- },
+ "prefix-sidecar-scripts": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ },
+ },
+ "default": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
+ "prefix-test": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ corev1.ResourceMemory: resource.MustParse("32Mi"),
+ },
+ },
+ "foo": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ corev1.ResourceMemory: resource.MustParse("64Mi"),
+ },
+ },
+ },
+ getExpectedPod: func() *corev1.Pod {
+ expectedPod := testPod.DeepCopy()
+ workDirResourceReqs := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("400m"),
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ corev1.ResourceCPU: resource.MustParse("250m"),
+ },
+ }
+ sideCarResourceReqs := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ },
+ }
+ defaultResourceReqs := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ }
+
+ testResourceReqs := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ corev1.ResourceMemory: resource.MustParse("32Mi"),
+ },
+ }
+ fooResourceReqs := corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ corev1.ResourceMemory: resource.MustParse("64Mi"),
+ },
+ }
+
+ expectedPod.Spec = corev1.PodSpec{
+ InitContainers: []corev1.Container{
+ {Name: "place-scripts", Resources: defaultResourceReqs},
+ {Name: "prepare", Resources: defaultResourceReqs},
+ {Name: "working-dir-initializer", Resources: workDirResourceReqs},
+ {Name: "test-01", Resources: testResourceReqs},
+ {Name: "foo", Resources: fooResourceReqs},
+ },
+ Containers: []corev1.Container{
+ {Name: "scripts-01", Resources: defaultResourceReqs},
+ {Name: "scripts-02", Resources: defaultResourceReqs},
+ {Name: "sidecar-scripts-01", Resources: sideCarResourceReqs},
+ {Name: "sidecar-scripts-02", Resources: sideCarResourceReqs},
+ {Name: "test-01", Resources: testResourceReqs},
+ {Name: "foo", Resources: fooResourceReqs},
+ },
+ }
+ return expectedPod
},
- want: true,
},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := artifactsPathReferenced(tt.steps)
- if d := cmp.Diff(tt.want, got); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-func TestPodBuildWithK8s129(t *testing.T) {
- always := corev1.ContainerRestartPolicyAlways
- ts := v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "name",
- Image: "image",
- Command: []string{"cmd"}, // avoid entrypoint lookup.
- }},
- Sidecars: []v1.Sidecar{{
- Name: "name",
- Image: "image",
- Command: []string{"cmd"},
- }},
- }
- want := &corev1.PodSpec{
- RestartPolicy: corev1.RestartPolicyNever,
- InitContainers: []corev1.Container{
- entrypointInitContainer(
- images.EntrypointImage,
- []v1.Step{{Name: "name"}},
- false, /* setSecurityContext */
- false /* windows */),
- {
- Name: "sidecar-name",
- Image: "image",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file",
- "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file",
- "/tekton/run/0/out",
- "-termination_path",
- "/tekton/termination",
- "-step_metadata_dir",
- "/tekton/run/0/status",
- "-entrypoint",
- "cmd",
- "--",
+ // verifies with existing data
+ {
+ name: "test-with-existing-data",
+ targetPod: &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
+ Spec: corev1.PodSpec{
+ InitContainers: []corev1.Container{
+ {Name: "place-scripts"},
+ {Name: "prepare", Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ }},
+ {Name: "working-dir-initializer"},
+ },
+ Containers: []corev1.Container{
+ {Name: "scripts-01"},
+ {Name: "scripts-02"},
+ {Name: "sidecar-scripts-01"},
+ {Name: "sidecar-scripts-02"},
+ },
},
- RestartPolicy: &always,
},
- },
- Containers: []corev1.Container{{
- Name: "step-name",
- Image: "image",
- Command: []string{"/tekton/bin/entrypoint"},
- Args: []string{
- "-wait_file",
- "/tekton/downward/ready",
- "-wait_file_content",
- "-post_file",
- "/tekton/run/0/out",
- "-termination_path",
- "/tekton/termination",
- "-step_metadata_dir",
- "/tekton/run/0/status",
- "-entrypoint",
- "cmd",
- "--",
+ resourceRequirements: map[string]corev1.ResourceRequirements{
+ "prepare": {
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("1"),
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ },
},
- }},
- }
- featureFlags := map[string]string{
- "enable-kubernetes-sidecar": "true",
- }
- store := config.NewStore(logtesting.TestLogger(t))
- store.OnConfigChanged(
- &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
- Data: featureFlags,
- },
- )
- kubeclient := fakek8s.NewSimpleClientset(
- &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}},
- &corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"},
- Secrets: []corev1.ObjectReference{{
- Name: "multi-creds",
- }},
- },
- &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "multi-creds",
- Namespace: "default",
- Annotations: map[string]string{
- "tekton.dev/docker-0": "https://us.gcr.io",
- "tekton.dev/docker-1": "https://docker.io",
- "tekton.dev/git-0": "github.com",
- "tekton.dev/git-1": "gitlab.com",
- },
- },
- Type: "kubernetes.io/basic-auth",
- Data: map[string][]byte{
- "username": []byte("foo"),
- "password": []byte("BestEver"),
+ getExpectedPod: func() *corev1.Pod {
+ expectedPod := &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "custom-ns"},
+ Spec: corev1.PodSpec{
+ InitContainers: []corev1.Container{
+ {Name: "place-scripts"},
+ {Name: "prepare", Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ }},
+ {Name: "working-dir-initializer"},
+ },
+ Containers: []corev1.Container{
+ {Name: "scripts-01"},
+ {Name: "scripts-02"},
+ {Name: "sidecar-scripts-01"},
+ {Name: "sidecar-scripts-02"},
+ },
+ },
+ }
+ return expectedPod
},
},
- )
- fakeDisc, _ := kubeclient.Discovery().(*fakediscovery.FakeDiscovery)
- fakeDisc.FakedServerVersion = &version.Info{
- Major: "1",
- Minor: "29",
- }
-
- trs := v1.TaskRunSpec{
- TaskSpec: &ts,
- }
-
- tr := &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "taskrunName",
- Namespace: "default",
- },
- Spec: trs,
}
- // No entrypoints should be looked up.
- entrypointCache := fakeCache{}
-
- builder := Builder{
- Images: images,
- KubeClient: kubeclient,
- EntrypointCache: entrypointCache,
- }
- got, err := builder.Build(store.ToContext(context.Background()), tr, ts)
- if err != nil {
- t.Errorf("Pod build failed: %s", err)
- }
- if d := cmp.Diff(want.InitContainers[1].Name, got.Spec.InitContainers[1].Name); d != "" {
- t.Errorf("Pod does not have sidecar in init list: %s", diff.PrintWantGot(d))
- }
+ for _, tc := range tcs {
+ t.Run(tc.name, func(t *testing.T) {
+ targetPod := tc.targetPod
+ updateResourceRequirements(tc.resourceRequirements, targetPod)
- if d := cmp.Diff(want.InitContainers[1].RestartPolicy, got.Spec.InitContainers[1].RestartPolicy); d != "" {
- t.Errorf("Sidecar does not have RestartPolicy Always: %s", diff.PrintWantGot(d))
+ expectedPod := tc.getExpectedPod()
+ if d := cmp.Diff(expectedPod, targetPod); d != "" {
+ t.Errorf("Diff %s", diff.PrintWantGot(d))
+ }
+ })
}
}
diff --git a/upstream/pkg/pod/script.go b/upstream/pkg/pod/script.go
index 611c109c6ee..e2f3ca5d24d 100644
--- a/upstream/pkg/pod/script.go
+++ b/upstream/pkg/pod/script.go
@@ -87,13 +87,13 @@ func convertScripts(shellImageLinux string, shellImageWin string, steps []v1.Ste
shellImage := shellImageLinux
shellCommand := "sh"
shellArg := "-c"
- securityContext := LinuxSecurityContext
+ securityContext := linuxSecurityContext
// Set windows variants for Image, Command and Args
if requiresWindows {
shellImage = shellImageWin
shellCommand = "pwsh"
shellArg = "-Command"
- securityContext = WindowsSecurityContext
+ securityContext = windowsSecurityContext
}
placeScriptsInit := corev1.Container{
@@ -146,7 +146,9 @@ func convertListOfSteps(steps []v1.Step, initContainer *corev1.Container, debugC
}
containers = append(containers, *c)
}
- placeDebugScriptInContainers(containers, initContainer, debugConfig)
+ if debugConfig != nil && debugConfig.NeedsDebugOnFailure() {
+ placeDebugScriptInContainers(containers, initContainer)
+ }
return containers
}
@@ -212,48 +214,26 @@ func encodeScript(script string) string {
// placeDebugScriptInContainers inserts debug scripts into containers. It capsules those scripts to files in initContainer,
// then executes those scripts in target containers.
-func placeDebugScriptInContainers(containers []corev1.Container, initContainer *corev1.Container, debugConfig *v1.TaskRunDebug) {
- if debugConfig == nil || !debugConfig.NeedsDebug() {
- return
- }
-
- isDebugOnFailure := debugConfig != nil && debugConfig.NeedsDebugOnFailure()
- var needDebugBeforeStep bool
-
- for i := range containers {
+func placeDebugScriptInContainers(containers []corev1.Container, initContainer *corev1.Container) {
+ for i := 0; i < len(containers); i++ {
debugInfoVolumeMount := corev1.VolumeMount{
Name: debugInfoVolumeName,
MountPath: filepath.Join(debugInfoDir, strconv.Itoa(i)),
}
(&containers[i]).VolumeMounts = append((&containers[i]).VolumeMounts, debugScriptsVolumeMount, debugInfoVolumeMount)
- if debugConfig != nil && debugConfig.NeedsDebugBeforeStep(containers[i].Name) {
- needDebugBeforeStep = true
- }
}
type script struct {
name string
content string
}
- debugScripts := make([]script, 0)
- if isDebugOnFailure {
- debugScripts = append(debugScripts, []script{{
- name: "continue",
- content: defaultScriptPreamble + fmt.Sprintf(debugContinueScriptTemplate, len(containers), debugInfoDir, RunDir),
- }, {
- name: "fail-continue",
- content: defaultScriptPreamble + fmt.Sprintf(debugFailScriptTemplate, len(containers), debugInfoDir, RunDir),
- }}...)
- }
- if needDebugBeforeStep {
- debugScripts = append(debugScripts, []script{{
- name: "beforestep-continue",
- content: defaultScriptPreamble + fmt.Sprintf(debugBeforeStepContinueScriptTemplate, len(containers), debugInfoDir, RunDir),
- }, {
- name: "beforestep-fail-continue",
- content: defaultScriptPreamble + fmt.Sprintf(debugBeforeStepFailScriptTemplate, len(containers), debugInfoDir, RunDir),
- }}...)
- }
+ debugScripts := []script{{
+ name: "continue",
+ content: defaultScriptPreamble + fmt.Sprintf(debugContinueScriptTemplate, len(containers), debugInfoDir, RunDir),
+ }, {
+ name: "fail-continue",
+ content: defaultScriptPreamble + fmt.Sprintf(debugFailScriptTemplate, len(containers), debugInfoDir, RunDir),
+ }}
// Add debug or breakpoint related scripts to /tekton/debug/scripts
// Iterate through the debugScripts and add routine for each of them in the initContainer for their creation
diff --git a/upstream/pkg/pod/script_test.go b/upstream/pkg/pod/script_test.go
index b18856534f8..93c1ed51ce5 100644
--- a/upstream/pkg/pod/script_test.go
+++ b/upstream/pkg/pod/script_test.go
@@ -159,7 +159,7 @@ _EOF_
/tekton/bin/entrypoint decode-script "${scriptfile}"
`},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
- SecurityContext: LinuxSecurityContext,
+ SecurityContext: linuxSecurityContext,
}
want := []corev1.Container{{
Image: "step-1",
@@ -353,7 +353,7 @@ _EOF_
}
}
-func TestConvertScripts_WithBreakpoints(t *testing.T) {
+func TestConvertScripts_WithBreakpoint_OnFailure(t *testing.T) {
names.TestingSeed()
preExistingVolumeMounts := []corev1.VolumeMount{{
@@ -363,45 +363,37 @@ func TestConvertScripts_WithBreakpoints(t *testing.T) {
Name: "another-one",
MountPath: "/another/one",
}}
- testCases := []struct {
- name string
- steps []v1.Step
- wantInit *corev1.Container
- wantSteps []corev1.Container
- taskRunDebug *v1.TaskRunDebug
- }{
- {
- name: "set breakpoint only on failure",
- steps: []v1.Step{{
- Script: `#!/bin/sh
+
+ gotInit, gotSteps, gotSidecars := convertScripts(images.ShellImage, images.ShellImageWin, []v1.Step{{
+ Script: `#!/bin/sh
script-1`,
- Image: "step-1",
- }, {
- // No script to convert here.
- Image: "step-2",
- }, {
- Script: `
+ Image: "step-1",
+ }, {
+ // No script to convert here.
+ Image: "step-2",
+ }, {
+ Script: `
#!/bin/sh
script-3`,
- Image: "step-3",
- VolumeMounts: preExistingVolumeMounts,
- Args: []string{"my", "args"},
- }, {
- Script: `no-shebang`,
- Image: "step-3",
- VolumeMounts: preExistingVolumeMounts,
- Args: []string{"my", "args"},
- }},
- taskRunDebug: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- OnFailure: "enabled",
- },
- },
- wantInit: &corev1.Container{
- Name: "place-scripts",
- Image: images.ShellImage,
- Command: []string{"sh"},
- Args: []string{"-c", `scriptfile="/tekton/scripts/script-0-9l9zj"
+ Image: "step-3",
+ VolumeMounts: preExistingVolumeMounts,
+ Args: []string{"my", "args"},
+ }, {
+ Script: `no-shebang`,
+ Image: "step-3",
+ VolumeMounts: preExistingVolumeMounts,
+ Args: []string{"my", "args"},
+ }}, []v1.Sidecar{}, &v1.TaskRunDebug{
+ Breakpoints: &v1.TaskBreakpoints{
+ OnFailure: "enabled",
+ },
+ }, true)
+
+ wantInit := &corev1.Container{
+ Name: "place-scripts",
+ Image: images.ShellImage,
+ Command: []string{"sh"},
+ Args: []string{"-c", `scriptfile="/tekton/scripts/script-0-9l9zj"
touch ${scriptfile} && chmod +x ${scriptfile}
cat > ${scriptfile} << '_EOF_'
IyEvYmluL3NoCnNjcmlwdC0x
@@ -464,176 +456,49 @@ else
fi
debug-fail-continue-heredoc-randomly-generated-6nl7g
`},
- VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount, debugScriptsVolumeMount},
- SecurityContext: LinuxSecurityContext,
- },
- wantSteps: []corev1.Container{{
- Image: "step-1",
- Command: []string{"/tekton/scripts/script-0-9l9zj"},
- VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount, debugScriptsVolumeMount,
- {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/0"}},
- }, {
- Image: "step-2",
- VolumeMounts: []corev1.VolumeMount{
- debugScriptsVolumeMount, {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/1"},
- },
- }, {
- Image: "step-3",
- Command: []string{"/tekton/scripts/script-2-mz4c7"},
- Args: []string{"my", "args"},
- VolumeMounts: append(preExistingVolumeMounts, scriptsVolumeMount, debugScriptsVolumeMount,
- corev1.VolumeMount{Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/2"},
- ),
- }, {
- Image: "step-3",
- Command: []string{"/tekton/scripts/script-3-mssqb"},
- Args: []string{"my", "args"},
- VolumeMounts: []corev1.VolumeMount{
- {Name: "pre-existing-volume-mount", MountPath: "/mount/path"},
- {Name: "another-one", MountPath: "/another/one"},
- scriptsVolumeMount,
- debugScriptsVolumeMount,
- {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/3"},
- },
- }},
- }, {
- name: "set all breakpoints with onfailure debugBeforeStep",
- steps: []v1.Step{{
- Name: "step-1",
- Script: `#!/bin/sh
-script-1`,
- Image: "step-1",
- }},
- taskRunDebug: &v1.TaskRunDebug{
- Breakpoints: &v1.TaskBreakpoints{
- OnFailure: "enabled",
- BeforeSteps: []string{"step-1"},
- },
- },
- wantInit: &corev1.Container{
- Name: "place-scripts",
- Image: images.ShellImage,
- Command: []string{"sh"},
- Args: []string{"-c", `scriptfile="/tekton/scripts/script-0-9l9zj"
-touch ${scriptfile} && chmod +x ${scriptfile}
-cat > ${scriptfile} << '_EOF_'
-IyEvYmluL3NoCnNjcmlwdC0x
-_EOF_
-/tekton/bin/entrypoint decode-script "${scriptfile}"
-tmpfile="/tekton/debug/scripts/debug-continue"
-touch ${tmpfile} && chmod +x ${tmpfile}
-cat > ${tmpfile} << 'debug-continue-heredoc-randomly-generated-mz4c7'
-#!/bin/sh
-set -e
-
-numberOfSteps=1
-debugInfo=/tekton/debug/info
-tektonRun=/tekton/run
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
-
-if [ $stepNumber -lt $numberOfSteps ]; then
- touch ${tektonRun}/${stepNumber}/out # Mark step as success
- echo "0" > ${tektonRun}/${stepNumber}/out.breakpointexit
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, breakpoint exiting !"
- exit 0
-fi
-debug-continue-heredoc-randomly-generated-mz4c7
-tmpfile="/tekton/debug/scripts/debug-fail-continue"
-touch ${tmpfile} && chmod +x ${tmpfile}
-cat > ${tmpfile} << 'debug-fail-continue-heredoc-randomly-generated-mssqb'
-#!/bin/sh
-set -e
-
-numberOfSteps=1
-debugInfo=/tekton/debug/info
-tektonRun=/tekton/run
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
-
-if [ $stepNumber -lt $numberOfSteps ]; then
- touch ${tektonRun}/${stepNumber}/out.err # Mark step as a failure
- echo "1" > ${tektonRun}/${stepNumber}/out.breakpointexit
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, breakpoint exiting !"
- exit 0
-fi
-debug-fail-continue-heredoc-randomly-generated-mssqb
-tmpfile="/tekton/debug/scripts/debug-beforestep-continue"
-touch ${tmpfile} && chmod +x ${tmpfile}
-cat > ${tmpfile} << 'debug-beforestep-continue-heredoc-randomly-generated-78c5n'
-#!/bin/sh
-set -e
-
-numberOfSteps=1
-debugInfo=/tekton/debug/info
-tektonRun=/tekton/run
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
-
-if [ $stepNumber -lt $numberOfSteps ]; then
- echo "0" > ${tektonRun}/${stepNumber}/out.beforestepexit
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, before step breakpoint exiting !"
- exit 0
-fi
-debug-beforestep-continue-heredoc-randomly-generated-78c5n
-tmpfile="/tekton/debug/scripts/debug-beforestep-fail-continue"
-touch ${tmpfile} && chmod +x ${tmpfile}
-cat > ${tmpfile} << 'debug-beforestep-fail-continue-heredoc-randomly-generated-6nl7g'
-#!/bin/sh
-set -e
-
-numberOfSteps=1
-debugInfo=/tekton/debug/info
-tektonRun=/tekton/run
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
+ VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount, debugScriptsVolumeMount},
+ SecurityContext: linuxSecurityContext,
+ }
-if [ $stepNumber -lt $numberOfSteps ]; then
- echo "1" > ${tektonRun}/${stepNumber}/out.beforestepexit.err
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, before step breakpoint exiting !"
- exit 0
-fi
-debug-beforestep-fail-continue-heredoc-randomly-generated-6nl7g
-`},
- VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount, debugScriptsVolumeMount},
- SecurityContext: LinuxSecurityContext},
- wantSteps: []corev1.Container{{
- Name: "step-1",
- Image: "step-1",
- Command: []string{"/tekton/scripts/script-0-9l9zj"},
- VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount, debugScriptsVolumeMount,
- {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/0"}},
- }},
+ want := []corev1.Container{{
+ Image: "step-1",
+ Command: []string{"/tekton/scripts/script-0-9l9zj"},
+ VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount, debugScriptsVolumeMount,
+ {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/0"}},
+ }, {
+ Image: "step-2",
+ VolumeMounts: []corev1.VolumeMount{
+ debugScriptsVolumeMount, {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/1"},
+ },
+ }, {
+ Image: "step-3",
+ Command: []string{"/tekton/scripts/script-2-mz4c7"},
+ Args: []string{"my", "args"},
+ VolumeMounts: append(preExistingVolumeMounts, scriptsVolumeMount, debugScriptsVolumeMount,
+ corev1.VolumeMount{Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/2"}),
+ }, {
+ Image: "step-3",
+ Command: []string{"/tekton/scripts/script-3-mssqb"},
+ Args: []string{"my", "args"},
+ VolumeMounts: []corev1.VolumeMount{
+ {Name: "pre-existing-volume-mount", MountPath: "/mount/path"},
+ {Name: "another-one", MountPath: "/another/one"},
+ scriptsVolumeMount,
+ debugScriptsVolumeMount,
+ {Name: debugInfoVolumeName, MountPath: "/tekton/debug/info/3"},
},
+ }}
+
+ if d := cmp.Diff(wantInit, gotInit); d != "" {
+ t.Errorf("Init Container Diff %s", diff.PrintWantGot(d))
}
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- names.TestingSeed()
- gotInit, gotSteps, gotSidecars := convertScripts(images.ShellImage, images.ShellImageWin, tc.steps, []v1.Sidecar{}, tc.taskRunDebug, true)
- if d := cmp.Diff(tc.wantInit, gotInit); d != "" {
- t.Errorf("Init Container Diff %s", diff.PrintWantGot(d))
- }
- if d := cmp.Diff(tc.wantSteps, gotSteps); d != "" {
- t.Errorf("Containers Diff %s", diff.PrintWantGot(d))
- }
+ if d := cmp.Diff(want, gotSteps); d != "" {
+ t.Errorf("Containers Diff %s", diff.PrintWantGot(d))
+ }
- if len(gotSidecars) != 0 {
- t.Errorf("Expected zero sidecars, got %v", len(gotSidecars))
- }
- })
+ if len(gotSidecars) != 0 {
+ t.Errorf("Expected zero sidecars, got %v", len(gotSidecars))
}
}
@@ -690,7 +555,7 @@ _EOF_
/tekton/bin/entrypoint decode-script "${scriptfile}"
`},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
- SecurityContext: LinuxSecurityContext,
+ SecurityContext: linuxSecurityContext,
}
want := []corev1.Container{{
Image: "step-1",
@@ -777,7 +642,7 @@ no-shebang
"@ | Out-File -FilePath /tekton/scripts/script-3-mssqb.cmd
`},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
- SecurityContext: WindowsSecurityContext,
+ SecurityContext: windowsSecurityContext,
}
want := []corev1.Container{{
Image: "step-1",
@@ -860,7 +725,7 @@ sidecar-1
"@ | Out-File -FilePath /tekton/scripts/sidecar-script-0-mssqb
`},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
- SecurityContext: WindowsSecurityContext,
+ SecurityContext: windowsSecurityContext,
}
want := []corev1.Container{{
Image: "step-1",
@@ -922,7 +787,7 @@ sidecar-1
"@ | Out-File -FilePath /tekton/scripts/sidecar-script-0-9l9zj
`},
VolumeMounts: []corev1.VolumeMount{writeScriptsVolumeMount, binMount},
- SecurityContext: WindowsSecurityContext,
+ SecurityContext: windowsSecurityContext,
}
want := []corev1.Container{{
Image: "step-1",
diff --git a/upstream/pkg/pod/scripts_constants.go b/upstream/pkg/pod/scripts_constants.go
index 7b018b6c61c..8a5867fa0c4 100644
--- a/upstream/pkg/pod/scripts_constants.go
+++ b/upstream/pkg/pod/scripts_constants.go
@@ -49,36 +49,6 @@ if [ $stepNumber -lt $numberOfSteps ]; then
else
echo "Last step (no. $stepNumber) has already been executed, breakpoint exiting !"
exit 0
-fi`
- debugBeforeStepContinueScriptTemplate = `
-numberOfSteps=%d
-debugInfo=%s
-tektonRun=%s
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
-
-if [ $stepNumber -lt $numberOfSteps ]; then
- echo "0" > ${tektonRun}/${stepNumber}/out.beforestepexit
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, before step breakpoint exiting !"
- exit 0
-fi`
- debugBeforeStepFailScriptTemplate = `
-numberOfSteps=%d
-debugInfo=%s
-tektonRun=%s
-
-postFile="$(ls ${debugInfo} | grep -E '[0-9]+' | tail -1)"
-stepNumber="$(echo ${postFile} | sed 's/[^0-9]*//g')"
-
-if [ $stepNumber -lt $numberOfSteps ]; then
- echo "1" > ${tektonRun}/${stepNumber}/out.beforestepexit.err
- echo "Executing step $stepNumber..."
-else
- echo "Last step (no. $stepNumber) has already been executed, before step breakpoint exiting !"
- exit 0
fi`
initScriptDirective = `tmpfile="%s"
touch ${tmpfile} && chmod +x ${tmpfile}
diff --git a/upstream/pkg/pod/status.go b/upstream/pkg/pod/status.go
index c166bda961e..90fa5a8cd54 100644
--- a/upstream/pkg/pod/status.go
+++ b/upstream/pkg/pod/status.go
@@ -125,7 +125,7 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1.Tas
sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers)
- complete := areContainersCompleted(ctx, pod) || isPodCompleted(pod)
+ complete := areContainersCompleted(ctx, pod) || pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed
if complete {
onError, ok := tr.Annotations[v1.PipelineTaskOnErrorAnnotation]
@@ -139,6 +139,7 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1.Tas
}
trs.PodName = pod.Name
+ trs.Steps = []v1.StepState{}
trs.Sidecars = []v1.SidecarState{}
var stepStatuses []corev1.ContainerStatus
@@ -150,11 +151,6 @@ func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1.Tas
sidecarStatuses = append(sidecarStatuses, s)
}
}
- for _, s := range pod.Status.InitContainerStatuses {
- if IsContainerSidecar(s.Name) {
- sidecarStatuses = append(sidecarStatuses, s)
- }
- }
var merr *multierror.Error
if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, pod.Status.Phase, kubeclient, ts); err != nil {
@@ -184,18 +180,9 @@ func createTaskResultsFromStepResults(stepRunRes []v1.TaskRunStepResult, neededS
return taskResults
}
-func setTaskRunArtifactsFromRunResult(runResults []result.RunResult, artifacts *v1.Artifacts) error {
- for _, slr := range runResults {
- if slr.ResultType == result.TaskRunArtifactsResultType {
- return json.Unmarshal([]byte(slr.Value), artifacts)
- }
- }
- return nil
-}
-
-func getTaskResultsFromSidecarLogs(runResults []result.RunResult) []result.RunResult {
+func getTaskResultsFromSidecarLogs(sidecarLogResults []result.RunResult) []result.RunResult {
taskResultsFromSidecarLogs := []result.RunResult{}
- for _, slr := range runResults {
+ for _, slr := range sidecarLogResults {
if slr.ResultType == result.TaskRunResultType {
taskResultsFromSidecarLogs = append(taskResultsFromSidecarLogs, slr)
}
@@ -235,33 +222,20 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL
// Extract results from sidecar logs
sidecarLogsResultsEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.ResultExtractionMethod == config.ResultExtractionMethodSidecarLogs
- // temporary solution to check if artifacts sidecar created in taskRun as we don't have the api for users to declare if a step/task is producing artifacts yet
- artifactsSidecarCreated := artifactsPathReferenced(ts.Steps)
sidecarLogResults := []result.RunResult{}
-
- if sidecarLogsResultsEnabled {
+ if sidecarLogsResultsEnabled && tr.Status.TaskSpec.Results != nil {
// extraction of results from sidecar logs
- if tr.Status.TaskSpec.Results != nil || artifactsSidecarCreated {
- slr, err := sidecarlogresults.GetResultsFromSidecarLogs(ctx, kubeclient, tr.Namespace, tr.Status.PodName, pipeline.ReservedResultsSidecarContainerName, podPhase)
- if err != nil {
- merr = multierror.Append(merr, err)
- }
- sidecarLogResults = append(sidecarLogResults, slr...)
+ slr, err := sidecarlogresults.GetResultsFromSidecarLogs(ctx, kubeclient, tr.Namespace, tr.Status.PodName, pipeline.ReservedResultsSidecarContainerName, podPhase)
+ if err != nil {
+ merr = multierror.Append(merr, err)
}
+ sidecarLogResults = append(sidecarLogResults, slr...)
}
// Populate Task results from sidecar logs
taskResultsFromSidecarLogs := getTaskResultsFromSidecarLogs(sidecarLogResults)
taskResults, _, _ := filterResults(taskResultsFromSidecarLogs, specResults, nil)
if tr.IsDone() {
trs.Results = append(trs.Results, taskResults...)
- var tras v1.Artifacts
- err := setTaskRunArtifactsFromRunResult(sidecarLogResults, &tras)
- if err != nil {
- logger.Errorf("Failed to set artifacts value from sidecar logs: %v", err)
- merr = multierror.Append(merr, err)
- } else {
- trs.Artifacts = &tras
- }
}
// Continue with extraction of termination messages
@@ -296,28 +270,27 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL
// Set TaskResults from StepResults
trs.Results = append(trs.Results, createTaskResultsFromStepResults(stepRunRes, neededStepResults)...)
}
- var sas v1.Artifacts
-
- err = setStepArtifactsValueFromSidecarLogResult(sidecarLogResults, s.Name, &sas)
- if err != nil {
- logger.Errorf("Failed to set artifacts value from sidecar logs: %v", err)
- merr = multierror.Append(merr, err)
- }
// Parse termination messages
terminationReason := ""
+ var as v1.Artifacts
if state.Terminated != nil && len(state.Terminated.Message) != 0 {
msg := state.Terminated.Message
results, err := termination.ParseMessage(logger, msg)
if err != nil {
- logger.Errorf("termination message could not be parsed sas JSON: %v", err)
+ logger.Errorf("termination message could not be parsed as JSON: %v", err)
merr = multierror.Append(merr, err)
} else {
- err := setStepArtifactsValueFromTerminationMessageRunResult(results, &sas)
- if err != nil {
- logger.Errorf("error setting step artifacts of step %q in taskrun %q: %v", s.Name, tr.Name, err)
- merr = multierror.Append(merr, err)
+ for _, r := range results {
+ if r.ResultType == result.ArtifactsResultType {
+ if err := json.Unmarshal([]byte(r.Value), &as); err != nil {
+ logger.Errorf("result value could not be parsed as Artifacts: %v", err)
+ merr = multierror.Append(merr, err)
+ }
+ // there should be only one ArtifactsResult
+ break
+ }
}
time, err := extractStartedAtTimeFromResults(results)
if err != nil {
@@ -336,15 +309,6 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL
// Set TaskResults from StepResults
taskResults = append(taskResults, createTaskResultsFromStepResults(stepRunRes, neededStepResults)...)
trs.Results = append(trs.Results, taskResults...)
-
- var tras v1.Artifacts
- err := setTaskRunArtifactsFromRunResult(filteredResults, &tras)
- if err != nil {
- logger.Errorf("error setting step artifacts in taskrun %q: %v", tr.Name, err)
- merr = multierror.Append(merr, err)
- }
- trs.Artifacts.Merge(&tras)
- trs.Artifacts.Merge(&sas)
}
msg, err = createMessageFromResults(filteredResults)
if err != nil {
@@ -364,51 +328,21 @@ func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredL
terminationReason = getTerminationReason(state.Terminated.Reason, terminationFromResults, exitCode)
}
}
- stepState := v1.StepState{
+ trs.Steps = append(trs.Steps, v1.StepState{
ContainerState: *state,
- Name: TrimStepPrefix(s.Name),
+ Name: trimStepPrefix(s.Name),
Container: s.Name,
ImageID: s.ImageID,
Results: taskRunStepResults,
TerminationReason: terminationReason,
- Inputs: sas.Inputs,
- Outputs: sas.Outputs,
- }
- foundStep := false
- for i, ss := range trs.Steps {
- if ss.Name == stepState.Name {
- stepState.Provenance = ss.Provenance
- trs.Steps[i] = stepState
- foundStep = true
- break
- }
- }
- if !foundStep {
- trs.Steps = append(trs.Steps, stepState)
- }
+ Inputs: as.Inputs,
+ Outputs: as.Outputs,
+ })
}
return merr
}
-func setStepArtifactsValueFromSidecarLogResult(results []result.RunResult, name string, artifacts *v1.Artifacts) error {
- for _, r := range results {
- if r.Key == name && r.ResultType == result.StepArtifactsResultType {
- return json.Unmarshal([]byte(r.Value), artifacts)
- }
- }
- return nil
-}
-
-func setStepArtifactsValueFromTerminationMessageRunResult(results []result.RunResult, artifacts *v1.Artifacts) error {
- for _, r := range results {
- if r.ResultType == result.StepArtifactsResultType {
- return json.Unmarshal([]byte(r.Value), artifacts)
- }
- }
- return nil
-}
-
func setTaskRunStatusBasedOnSidecarStatus(sidecarStatuses []corev1.ContainerStatus, trs *v1.TaskRunStatus) {
for _, s := range sidecarStatuses {
trs.Sidecars = append(trs.Sidecars, v1.SidecarState{
@@ -514,10 +448,7 @@ func filterResults(results []result.RunResult, specResults []v1.TaskResult, step
}
taskRunStepResults = append(taskRunStepResults, taskRunStepResult)
filteredResults = append(filteredResults, r)
- case result.StepArtifactsResultType:
- filteredResults = append(filteredResults, r)
- continue
- case result.TaskRunArtifactsResultType:
+ case result.ArtifactsResultType:
filteredResults = append(filteredResults, r)
continue
case result.InternalTektonResultType:
@@ -567,11 +498,11 @@ func extractExitCodeFromResults(results []result.RunResult) (*int32, error) {
for _, result := range results {
if result.Key == "ExitCode" {
// We could just pass the string through but this provides extra validation
- i, err := strconv.ParseInt(result.Value, 10, 32)
+ i, err := strconv.ParseUint(result.Value, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse int value %q in ExitCode field: %w", result.Value, err)
}
- exitCode := int32(i) // #nosec G115: ParseInt was called with bit size 32, so this is safe
+ exitCode := int32(i)
return &exitCode, nil
}
}
@@ -635,30 +566,6 @@ func updateIncompleteTaskRunStatus(trs *v1.TaskRunStatus, pod *corev1.Pod) {
}
}
-// isPodCompleted checks if the given pod is completed.
-// A pod is considered completed if its phase is either "Succeeded" or "Failed".
-//
-// If it is foreseeable that the pod will eventually be in a failed state,
-// but it remains in a Running status for a visible period of time, it should be considered completed in advance.
-//
-// For example, when certain steps encounter OOM, only the pods that have timed out will change to a failed state,
-// we should consider them completed in advance.
-func isPodCompleted(pod *corev1.Pod) bool {
- if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
- return true
- }
- for _, s := range pod.Status.ContainerStatuses {
- if IsContainerStep(s.Name) {
- if s.State.Terminated != nil {
- if isOOMKilled(s) {
- return true
- }
- }
- }
- }
- return false
-}
-
// DidTaskRunFail check the status of pod to decide if related taskrun is failed
func DidTaskRunFail(pod *corev1.Pod) bool {
if pod.Status.Phase == corev1.PodFailed {
diff --git a/upstream/pkg/pod/status_test.go b/upstream/pkg/pod/status_test.go
index eb5a4695a33..681e50ca114 100644
--- a/upstream/pkg/pod/status_test.go
+++ b/upstream/pkg/pod/status_test.go
@@ -47,15 +47,14 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
ContainerStatuses []corev1.ContainerStatus
}{{
desc: "test result with large pipeline result",
- ContainerStatuses: []corev1.ContainerStatus{
- {
- Name: "step-bar-0",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`,
- },
+ ContainerStatuses: []corev1.ContainerStatus{{
+ Name: "step-bar-0",
+ State: corev1.ContainerState{
+ Terminated: &corev1.ContainerStateTerminated{
+ Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`,
},
},
+ },
{
Name: "step-bar1",
State: corev1.ContainerState{
@@ -71,8 +70,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234` + strings.Repeat("a", 3072) + `","resourceName":"source-image"}]`,
},
},
- },
- },
+ }},
}, {
desc: "The ExitCode in the result cannot modify the original ExitCode",
ContainerStatuses: []corev1.ContainerStatus{{
@@ -118,87 +116,36 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) {
func TestSetTaskRunStatusBasedOnStepStatus_sidecar_logs(t *testing.T) {
for _, c := range []struct {
- desc string
- maxResultSize int
- wantErr error
- enableArtifacts bool
- tr v1.TaskRun
+ desc string
+ maxResultSize int
+ wantErr error
}{{
- desc: "test result with sidecar logs too large",
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- TaskSpec: &v1.TaskSpec{
- Results: []v1.TaskResult{{
- Name: "result1",
- }},
- },
- PodName: "task-run-pod",
- },
- },
- },
+ desc: "test result with sidecar logs too large",
maxResultSize: 1,
wantErr: sidecarlogresults.ErrSizeExceeded,
}, {
- desc: "test result with sidecar logs bad format",
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- TaskSpec: &v1.TaskSpec{
- Results: []v1.TaskResult{{
- Name: "result1",
- }},
- },
- PodName: "task-run-pod",
- },
- },
- },
+ desc: "test result with sidecar logs bad format",
maxResultSize: 4096,
wantErr: fmt.Errorf("%s", "invalid result \"\": invalid character 'k' in literal false (expecting 'l')"),
- }, {
- desc: "test artifact with sidecar logs too large",
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- TaskSpec: &v1.TaskSpec{},
- PodName: "task-run-pod",
- },
- },
- },
- maxResultSize: 1,
- wantErr: sidecarlogresults.ErrSizeExceeded,
- enableArtifacts: true,
- }, {
- desc: "test artifact with sidecar logs bad format",
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- TaskSpec: &v1.TaskSpec{},
- PodName: "task-run-pod",
- },
- },
- },
- maxResultSize: 4096,
- wantErr: fmt.Errorf("%s", "invalid result \"\": invalid character 'k' in literal false (expecting 'l')"),
- enableArtifacts: true,
}} {
t.Run(c.desc, func(t *testing.T) {
+ tr := v1.TaskRun{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "task-run",
+ Namespace: "foo",
+ },
+ Status: v1.TaskRunStatus{
+ TaskRunStatusFields: v1.TaskRunStatusFields{
+ TaskSpec: &v1.TaskSpec{
+ Results: []v1.TaskResult{{
+ Name: "result1",
+ }},
+ },
+ PodName: "task-run-pod",
+ },
+ },
+ }
+
logger, _ := logging.NewLogger("", "status")
kubeclient := fakek8s.NewSimpleClientset()
pod := &corev1.Pod{
@@ -226,21 +173,15 @@ func TestSetTaskRunStatusBasedOnStepStatus_sidecar_logs(t *testing.T) {
if err != nil {
t.Errorf("Error occurred while creating pod %s: %s", pod.Name, err.Error())
}
- featureFlags := &config.FeatureFlags{
- ResultExtractionMethod: config.ResultExtractionMethodSidecarLogs,
- MaxResultSize: c.maxResultSize,
- }
- ts := &v1.TaskSpec{}
- if c.enableArtifacts {
- featureFlags.EnableArtifacts = true
- ts.Steps = []v1.Step{{Name: "name", Script: `echo aaa >> /tekton/steps/step-name/artifacts/provenance.json`}}
- }
ctx := config.ToContext(context.Background(), &config.Config{
- FeatureFlags: featureFlags,
+ FeatureFlags: &config.FeatureFlags{
+ ResultExtractionMethod: config.ResultExtractionMethodSidecarLogs,
+ MaxResultSize: c.maxResultSize,
+ },
})
var wantErr *multierror.Error
wantErr = multierror.Append(wantErr, c.wantErr)
- merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, []corev1.ContainerStatus{{}}, &c.tr, pod.Status.Phase, kubeclient, ts)
+ merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, []corev1.ContainerStatus{{}}, &tr, pod.Status.Phase, kubeclient, &v1.TaskSpec{})
if d := cmp.Diff(wantErr.Error(), merr.Error()); d != "" {
t.Errorf("Got unexpected error %s", diff.PrintWantGot(d))
@@ -301,8 +242,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"uri","value":"https://foo.bar\n","type":4}]`,
- },
- },
+ }},
Name: "one",
Container: "step-one",
Results: []v1.TaskRunStepResult{{
@@ -311,8 +251,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
Value: *v1.NewStructuredValues("https://foo.bar\n"),
}},
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "task-result",
Type: v1.ResultsTypeString,
@@ -367,8 +306,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"array","value":"[\"hello\",\"world\"]","type":4}]`,
- },
- },
+ }},
Name: "one",
Container: "step-one",
Results: []v1.TaskRunStepResult{{
@@ -377,8 +315,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
Value: *v1.NewStructuredValues("hello", "world"),
}},
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeArray,
@@ -436,8 +373,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","type":4},{"key":"resultName","value":"resultValue","type":4}]`,
- },
- },
+ }},
Name: "one",
Container: "step-one",
Results: []v1.TaskRunStepResult{{
@@ -450,8 +386,7 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
Value: *v1.NewStructuredValues("resultValue"),
}},
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultDigest",
Type: v1.ResultsTypeString,
@@ -498,179 +433,6 @@ func TestMakeTaskRunStatus_StepResults(t *testing.T) {
}
}
-func TestMakeTaskRunStatus_StepProvenance(t *testing.T) {
- for _, c := range []struct {
- desc string
- podStatus corev1.PodStatus
- pod corev1.Pod
- tr v1.TaskRun
- want v1.TaskRunStatus
- }{{
- desc: "provenance in step",
- podStatus: corev1.PodStatus{
- Phase: corev1.PodSucceeded,
- ContainerStatuses: []corev1.ContainerStatus{{
- Name: "step-one",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- }},
- },
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "one",
- Image: "bash",
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "one",
- Provenance: &v1.Provenance{RefSource: &v1.RefSource{
- URI: "pkg://foo/bar",
- Digest: map[string]string{"sha256": "digest"},
- }},
- }},
- },
- },
- },
- want: v1.TaskRunStatus{
- Status: statusSuccess(),
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- Name: "one",
- Container: "step-one",
- Results: []v1.TaskRunResult{},
- Provenance: &v1.Provenance{RefSource: &v1.RefSource{
- URI: "pkg://foo/bar",
- Digest: map[string]string{"sha256": "digest"},
- }},
- }},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
- // We don't actually care about the time, just that it's not nil
- CompletionTime: &metav1.Time{Time: time.Now()},
- },
- },
- }, {
- desc: "provenance in some steps",
- podStatus: corev1.PodStatus{
- Phase: corev1.PodSucceeded,
- ContainerStatuses: []corev1.ContainerStatus{{
- Name: "step-one",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- }, {
- Name: "step-two",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- }},
- },
- tr: v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "task-run",
- Namespace: "foo",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "one",
- Image: "bash",
- }, {
- Name: "two",
- Image: "bash",
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "one",
- Provenance: &v1.Provenance{RefSource: &v1.RefSource{
- URI: "pkg://foo/bar",
- Digest: map[string]string{"sha256": "digest"},
- }},
- }},
- },
- },
- },
- want: v1.TaskRunStatus{
- Status: statusSuccess(),
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- Name: "one",
- Container: "step-one",
- Results: []v1.TaskRunResult{},
- Provenance: &v1.Provenance{RefSource: &v1.RefSource{
- URI: "pkg://foo/bar",
- Digest: map[string]string{"sha256": "digest"},
- }},
- }, {
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
- Name: "two",
- Container: "step-two",
- Results: []v1.TaskRunResult{},
- }},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
- // We don't actually care about the time, just that it's not nil
- CompletionTime: &metav1.Time{Time: time.Now()},
- },
- },
- }} {
- t.Run(c.desc, func(t *testing.T) {
- now := metav1.Now()
- if cmp.Diff(c.pod, corev1.Pod{}) == "" {
- c.pod = corev1.Pod{
- ObjectMeta: metav1.ObjectMeta{
- Name: "pod",
- Namespace: "foo",
- CreationTimestamp: now,
- },
- Status: c.podStatus,
- }
- }
-
- logger, _ := logging.NewLogger("", "status")
- kubeclient := fakek8s.NewSimpleClientset()
- got, err := MakeTaskRunStatus(context.Background(), logger, c.tr, &c.pod, kubeclient, c.tr.Spec.TaskSpec)
- if err != nil {
- t.Errorf("MakeTaskRunResult: %s", err)
- }
-
- // Common traits, set for test case brevity.
- c.want.PodName = "pod"
-
- ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool {
- if x == nil {
- return y == nil
- }
- return y != nil
- })
- if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil); d != "" {
- t.Errorf("Diff %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
func TestMakeTaskRunStatus_StepArtifacts(t *testing.T) {
for _, c := range []struct {
desc string
@@ -712,64 +474,34 @@ func TestMakeTaskRunStatus_StepArtifacts(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"/tekton/run/0/status/artifacts/provenance.json","value":"{\n \"inputs\":[\n {\n \"name\":\"input-artifacts\",\n \"values\":[\n {\n \"uri\":\"git:jjjsss\",\n \"digest\":{\n \"sha256\":\"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0\"\n }\n }\n ]\n }\n ],\n \"outputs\":[\n {\n \"name\":\"build-results\",\n \"values\":[\n {\n \"uri\":\"pkg:balba\",\n \"digest\":{\n \"sha256\":\"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48\",\n \"sha1\":\"95588b8f34c31eb7d62c92aaa4e6506639b06ef2\"\n }\n }\n ]\n }\n ]\n}\n","type":5}]`,
- },
- },
+ }},
Name: "one",
Container: "step-one",
Inputs: []v1.Artifact{
{
Name: "input-artifacts",
- Values: []v1.ArtifactValue{
- {
- Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
- Uri: "git:jjjsss",
- },
+ Values: []v1.ArtifactValue{{
+ Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
+ Uri: "git:jjjsss",
+ },
},
},
},
Outputs: []v1.Artifact{
{
Name: "build-results",
- Values: []v1.ArtifactValue{
- {
- Digest: map[v1.Algorithm]string{
- "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
- "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- },
- Uri: "pkg:balba",
+ Values: []v1.ArtifactValue{{
+ Digest: map[v1.Algorithm]string{
+ "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
+ "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
},
+ Uri: "pkg:balba",
+ },
},
},
},
Results: []v1.TaskRunResult{},
}},
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{
- {
- Name: "input-artifacts",
- Values: []v1.ArtifactValue{
- {
- Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
- Uri: "git:jjjsss",
- },
- },
- },
- },
- Outputs: []v1.Artifact{
- {
- Name: "build-results",
- Values: []v1.ArtifactValue{
- {
- Digest: map[v1.Algorithm]string{
- "sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2",
- "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- },
- Uri: "pkg:balba",
- },
- },
- },
- },
- },
Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
@@ -826,6 +558,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusRunning(),
TaskRunStatusFields: v1.TaskRunStatusFields{
+ Steps: []v1.StepState{},
Sidecars: []v1.SidecarState{},
},
},
@@ -853,13 +586,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 123,
- },
- },
+ }},
Name: "state-name",
Container: "step-state-name",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: nil,
+ Sidecars: []v1.SidecarState{},
},
},
}, {
@@ -889,14 +620,12 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 123,
- },
- },
+ }},
Name: "state-name",
Container: "step-state-name",
ImageID: "image-id",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: nil,
+ Sidecars: []v1.SidecarState{},
},
},
}, {
@@ -920,14 +649,12 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
- },
- },
+ }},
Name: "step-push",
Container: "step-step-push",
ImageID: "image-id",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -981,15 +708,13 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 123,
- },
- },
+ }},
Name: "failure",
Container: "step-failure",
ImageID: "image-id",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1003,8 +728,8 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusFailure(v1.TaskRunReasonFailed.String(), "boom"),
TaskRunStatusFields: v1.TaskRunStatusFields{
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Steps: []v1.StepState{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1032,14 +757,12 @@ func TestMakeTaskRunStatus(t *testing.T) {
Terminated: &corev1.ContainerStateTerminated{
Reason: "OOMKilled",
ExitCode: 0,
- },
- },
+ }},
Name: "step-push",
Container: "step-step-push",
ImageID: "image-id",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1050,8 +773,8 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusFailure(v1.TaskRunReasonFailed.String(), "build failed for unspecified reasons."),
TaskRunStatusFields: v1.TaskRunStatusFields{
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Steps: []v1.StepState{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1100,6 +823,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusPending("Pending", `pod status "the type":"Unknown"; message: "the message"`),
TaskRunStatusFields: v1.TaskRunStatusFields{
+ Steps: []v1.StepState{},
Sidecars: []v1.SidecarState{},
},
},
@@ -1112,6 +836,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusPending("Pending", "pod status message"),
TaskRunStatusFields: v1.TaskRunStatusFields{
+ Steps: []v1.StepState{},
Sidecars: []v1.SidecarState{},
},
},
@@ -1121,6 +846,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusPending("Pending", "Pending"),
TaskRunStatusFields: v1.TaskRunStatusFields{
+ Steps: []v1.StepState{},
Sidecars: []v1.SidecarState{},
},
},
@@ -1136,6 +862,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusPending(ReasonExceededNodeResources, "TaskRun Pod exceeded available resources"),
TaskRunStatusFields: v1.TaskRunStatusFields{
+ Steps: []v1.StepState{},
Sidecars: []v1.SidecarState{},
},
},
@@ -1154,8 +881,8 @@ func TestMakeTaskRunStatus(t *testing.T) {
want: v1.TaskRunStatus{
Status: statusFailure(ReasonCreateContainerConfigError, "Failed to create pod due to config error"),
TaskRunStatusFields: v1.TaskRunStatusFields{
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Steps: []v1.StepState{},
+ Sidecars: []v1.SidecarState{},
},
},
}, {
@@ -1312,13 +1039,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:12345","resourceName":"source-image"}]`,
- },
- },
+ }},
Name: "foo",
Container: "step-foo",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1343,13 +1068,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeString,
@@ -1379,13 +1102,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`,
- },
- },
+ }},
Name: "banana",
Container: "step-banana",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeString,
@@ -1422,21 +1143,18 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultNameOne","value":"resultValueOne","type":1},{"key":"resultNameTwo","value":"resultValueTwo","type":1}]`,
- },
- },
+ }},
Name: "one",
Container: "step-one",
}, {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultNameOne","value":"resultValueThree","type":1},{"key":"resultNameTwo","value":"resultValueTwo","type":1}]`,
- },
- },
+ }},
Name: "two",
Container: "step-two",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultNameOne",
Type: v1.ResultsTypeString,
@@ -1450,46 +1168,6 @@ func TestMakeTaskRunStatus(t *testing.T) {
CompletionTime: &metav1.Time{Time: time.Now()},
},
},
- }, {
- desc: "oom occurred in the pod",
- podStatus: corev1.PodStatus{
- Phase: corev1.PodRunning,
- ContainerStatuses: []corev1.ContainerStatus{{
- Name: "step-one",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- Reason: oomKilled,
- ExitCode: 137,
- },
- },
- }, {
- Name: "step-two",
- State: corev1.ContainerState{},
- }},
- },
- want: v1.TaskRunStatus{
- Status: statusFailure(v1.TaskRunReasonFailed.String(), "\"step-one\" exited with code 137"),
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- Reason: oomKilled,
- ExitCode: 137,
- },
- },
- Name: "one",
- Container: "step-one",
- }, {
- ContainerState: corev1.ContainerState{},
- Name: "two",
- Container: "step-two",
- }},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
- // We don't actually care about the time, just that it's not nil
- CompletionTime: &metav1.Time{Time: time.Now()},
- },
- },
}, {
desc: "the failed task show task results",
podStatus: corev1.PodStatus{
@@ -1516,7 +1194,6 @@ func TestMakeTaskRunStatus(t *testing.T) {
Container: "step-task-result",
}},
Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
CompletionTime: &metav1.Time{Time: time.Now()},
Results: []v1.TaskRunResult{{
Name: "resultName",
@@ -1541,13 +1218,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
TaskRunStatusFields: v1.TaskRunStatusFields{
Steps: []v1.StepState{{
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "mango",
Container: "step-mango",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1570,13 +1245,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
TaskRunStatusFields: v1.TaskRunStatusFields{
Steps: []v1.StepState{{
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "pineapple",
Container: "step-pineapple",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1589,8 +1262,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
Name: "step-pear",
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
- Message: `[{"key":"resultNameOne","value":"","type":3}, {"key":"resultNameThree","value":"","type":1}]`,
- },
+ Message: `[{"key":"resultNameOne","value":"","type":3}, {"key":"resultNameThree","value":"","type":1}]`},
},
}},
},
@@ -1601,13 +1273,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultNameThree","value":"","type":1}]`,
- },
- },
+ }},
Name: "pear",
Container: "step-pear",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultNameThree",
Type: v1.ResultsTypeString,
@@ -1637,13 +1307,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultNameThree","value":"","type":1}]`,
- },
- },
+ }},
Name: "pear",
Container: "step-pear",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultNameThree",
Type: v1.ResultsTypeString,
@@ -1713,31 +1381,26 @@ func TestMakeTaskRunStatus(t *testing.T) {
Container: "step-first",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "second",
Container: "step-second",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "third",
Container: "step-third",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "",
Container: "step-",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "fourth",
Container: "step-fourth",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1788,13 +1451,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
- },
- },
+ }},
Name: "second",
Container: "step-second",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -1862,8 +1523,7 @@ func TestMakeTaskRunStatus(t *testing.T) {
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
- },
- },
+ }},
}},
ContainerStatuses: []corev1.ContainerStatus{{
Name: "step-A",
@@ -1890,7 +1550,6 @@ func TestMakeTaskRunStatus(t *testing.T) {
ImageID: "image-id-A",
}},
Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
CompletionTime: &metav1.Time{Time: time.Now()},
},
},
@@ -1913,17 +1572,15 @@ func TestMakeTaskRunStatus(t *testing.T) {
Phase: corev1.PodFailed,
Reason: "Evicted",
Message: `Usage of EmptyDir volume "ws-b6dfk" exceeds the limit "10Gi".`,
- ContainerStatuses: []corev1.ContainerStatus{
- {
- Name: "step-A",
- State: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 137,
- },
+ ContainerStatuses: []corev1.ContainerStatus{{
+ Name: "step-A",
+ State: corev1.ContainerState{
+ Terminated: &corev1.ContainerStateTerminated{
+ ExitCode: 137,
},
},
},
- },
+ }},
},
want: v1.TaskRunStatus{
Status: statusFailure(v1.TaskRunReasonFailed.String(), "Usage of EmptyDir volume \"ws-b6dfk\" exceeds the limit \"10Gi\"."),
@@ -1932,13 +1589,11 @@ func TestMakeTaskRunStatus(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 137,
- },
- },
+ }},
Name: "A",
Container: "step-A",
}},
Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
CompletionTime: &metav1.Time{Time: time.Now()},
},
},
@@ -2230,13 +1885,11 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeString,
@@ -2274,13 +1927,11 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"hello","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeString,
@@ -2318,13 +1969,11 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"[\"hello\",\"world\"]","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeArray,
@@ -2362,13 +2011,11 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"{\"hello\":\"world\"}","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeObject,
@@ -2410,13 +2057,11 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) {
ContainerState: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
Message: `[{"key":"resultName","value":"{\"hello\":\"world\"}","type":1},{"key":"resultName2","value":"[\"hello\",\"world\"]","type":1}]`,
- },
- },
+ }},
Name: "bar",
Container: "step-bar",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
Results: []v1.TaskRunResult{{
Name: "resultName",
Type: v1.ResultsTypeString,
@@ -2546,39 +2191,34 @@ func TestMakeRunStatusJSONError(t *testing.T) {
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
Message: "this is a non-json termination message. dont panic!",
- },
- },
+ }},
Name: "non-json",
Container: "step-non-json",
Results: []v1.TaskRunResult{},
ImageID: "image",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "after-non-json",
Container: "step-after-non-json",
Results: []v1.TaskRunResult{},
ImageID: "image",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "this-step-might-panic",
Container: "step-this-step-might-panic",
Results: []v1.TaskRunResult{},
ImageID: "image",
}, {
ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{},
- },
+ Terminated: &corev1.ContainerStateTerminated{}},
Name: "foo",
Container: "step-foo",
Results: []v1.TaskRunResult{},
ImageID: "image",
}},
- Sidecars: []v1.SidecarState{},
- Artifacts: &v1.Artifacts{},
+ Sidecars: []v1.SidecarState{},
// We don't actually care about the time, just that it's not nil
CompletionTime: &metav1.Time{Time: time.Now()},
},
@@ -2769,12 +2409,10 @@ func TestIsPodArchived(t *testing.T) {
}, {
name: "Pod is in the retriesStatus",
podName: "pod",
- retriesStatus: []v1.TaskRunStatus{
- {
- TaskRunStatusFields: v1.TaskRunStatusFields{
- PodName: "pod",
- },
- },
+ retriesStatus: []v1.TaskRunStatus{{
+ TaskRunStatusFields: v1.TaskRunStatusFields{
+ PodName: "pod",
+ }},
},
want: true,
}} {
@@ -2923,7 +2561,7 @@ func TestGetStepResultsFromSidecarLogs(t *testing.T) {
t.Errorf("did not expect an error but got: %v", err)
}
if d := cmp.Diff(want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
@@ -2938,7 +2576,7 @@ func TestGetStepResultsFromSidecarLogs_Error(t *testing.T) {
_, err := getStepResultsFromSidecarLogs(sidecarLogResults, stepName)
wantErr := fmt.Errorf("invalid string %s-%s : expected somtthing that looks like .", stepName, resultName)
if d := cmp.Diff(wantErr.Error(), err.Error()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
@@ -3162,6 +2800,6 @@ func TestGetTaskResultsFromSidecarLogs(t *testing.T) {
}}
got := getTaskResultsFromSidecarLogs(sidecarLogResults)
if d := cmp.Diff(want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
diff --git a/upstream/pkg/pod/workingdir_init.go b/upstream/pkg/pod/workingdir_init.go
index 65da50b01e3..af001dc104d 100644
--- a/upstream/pkg/pod/workingdir_init.go
+++ b/upstream/pkg/pod/workingdir_init.go
@@ -60,9 +60,9 @@ func workingDirInit(workingdirinitImage string, stepContainers []corev1.Containe
// There are no workingDirs to initialize.
return nil
}
- securityContext := LinuxSecurityContext
+ securityContext := linuxSecurityContext
if windows {
- securityContext = WindowsSecurityContext
+ securityContext = windowsSecurityContext
}
c := &corev1.Container{
diff --git a/upstream/pkg/pod/workingdir_init_test.go b/upstream/pkg/pod/workingdir_init_test.go
index c4a7b3b723d..05f1f65f133 100644
--- a/upstream/pkg/pod/workingdir_init_test.go
+++ b/upstream/pkg/pod/workingdir_init_test.go
@@ -90,7 +90,7 @@ func TestWorkingDirInit(t *testing.T) {
Args: []string{"/workspace/bbb", "aaa", "zzz"},
WorkingDir: pipeline.WorkspaceDir,
VolumeMounts: implicitVolumeMounts,
- SecurityContext: LinuxSecurityContext,
+ SecurityContext: linuxSecurityContext,
},
}, {
desc: "workingDirs are unique and sorted, absolute dirs are ignored, uses windows",
@@ -144,7 +144,7 @@ func TestWorkingDirInit(t *testing.T) {
Args: []string{"/workspace/bbb", "aaa", "zzz"},
WorkingDir: pipeline.WorkspaceDir,
VolumeMounts: implicitVolumeMounts,
- SecurityContext: WindowsSecurityContext,
+ SecurityContext: windowsSecurityContext,
},
}} {
t.Run(c.desc, func(t *testing.T) {
diff --git a/upstream/pkg/reconciler/apiserver/apiserver.go b/upstream/pkg/reconciler/apiserver/apiserver.go
index 336774a5706..8489f6e12af 100644
--- a/upstream/pkg/reconciler/apiserver/apiserver.go
+++ b/upstream/pkg/reconciler/apiserver/apiserver.go
@@ -25,7 +25,7 @@ var (
// DryRunValidate validates the obj by issuing a dry-run create request for it in the given namespace.
// This allows validating admission webhooks to process the object without actually creating it.
// obj must be a v1/v1beta1 Task or Pipeline.
-func DryRunValidate(ctx context.Context, namespace string, obj runtime.Object, tekton clientset.Interface) (runtime.Object, error) {
+func DryRunValidate(ctx context.Context, namespace string, obj runtime.Object, tekton clientset.Interface) error {
dryRunObjName := uuid.NewString() // Use a randomized name for the Pipeline/Task in case there is already another Pipeline/Task of the same name
switch obj := obj.(type) {
@@ -33,61 +33,42 @@ func DryRunValidate(ctx context.Context, namespace string, obj runtime.Object, t
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the PipelineRun
- mutatedObj, err := tekton.TektonV1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
+ if _, err := tekton.TektonV1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
+ return handleDryRunCreateErr(err, obj.Name)
}
- return mutatedObj, nil
case *v1beta1.Pipeline:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the PipelineRun
- mutatedObj, err := tekton.TektonV1beta1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
+ if _, err := tekton.TektonV1beta1().Pipelines(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
+ return handleDryRunCreateErr(err, obj.Name)
}
- return mutatedObj, nil
+
case *v1.Task:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the TaskRun
- mutatedObj, err := tekton.TektonV1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
+ if _, err := tekton.TektonV1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
+ return handleDryRunCreateErr(err, obj.Name)
}
- return mutatedObj, nil
case *v1beta1.Task:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the TaskRun
- mutatedObj, err := tekton.TektonV1beta1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
+ if _, err := tekton.TektonV1beta1().Tasks(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
+ return handleDryRunCreateErr(err, obj.Name)
}
- return mutatedObj, nil
case *v1alpha1.StepAction:
dryRunObj := obj.DeepCopy()
dryRunObj.Name = dryRunObjName
dryRunObj.Namespace = namespace // Make sure the namespace is the same as the StepAction
- mutatedObj, err := tekton.TektonV1alpha1().StepActions(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
+ if _, err := tekton.TektonV1alpha1().StepActions(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}); err != nil {
+ return handleDryRunCreateErr(err, obj.Name)
}
- return mutatedObj, nil
-
- case *v1beta1.StepAction:
- dryRunObj := obj.DeepCopy()
- dryRunObj.Name = dryRunObjName
- dryRunObj.Namespace = namespace // Make sure the namespace is the same as the StepAction
- mutatedObj, err := tekton.TektonV1beta1().StepActions(namespace).Create(ctx, dryRunObj, metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
- if err != nil {
- return nil, handleDryRunCreateErr(err, obj.Name)
- }
- return mutatedObj, nil
-
default:
- return nil, fmt.Errorf("unsupported object GVK %s", obj.GetObjectKind().GroupVersionKind())
+ return fmt.Errorf("unsupported object GVK %s", obj.GetObjectKind().GroupVersionKind())
}
+ return nil
}
func handleDryRunCreateErr(err error, objectName string) error {
diff --git a/upstream/pkg/reconciler/apiserver/apiserver_test.go b/upstream/pkg/reconciler/apiserver/apiserver_test.go
index 06f91abd8c5..7233a145d27 100644
--- a/upstream/pkg/reconciler/apiserver/apiserver_test.go
+++ b/upstream/pkg/reconciler/apiserver/apiserver_test.go
@@ -12,7 +12,6 @@ import (
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
@@ -24,47 +23,33 @@ func TestDryRunCreate_Valid_DifferentGVKs(t *testing.T) {
name string
obj runtime.Object
wantErr bool
- wantObj runtime.Object
}{{
- name: "v1 task",
- obj: &v1.Task{},
- wantObj: &v1.Task{},
+ name: "v1 task",
+ obj: &v1.Task{},
}, {
- name: "v1beta1 task",
- obj: &v1beta1.Task{},
- wantObj: &v1beta1.Task{},
- }, {
- name: "v1 pipeline",
- obj: &v1.Pipeline{},
- wantObj: &v1.Pipeline{},
+ name: "v1beta1 task",
+ obj: &v1beta1.Task{},
}, {
- name: "v1beta1 pipeline",
- obj: &v1beta1.Pipeline{},
- wantObj: &v1beta1.Pipeline{},
+ name: "v1 pipeline",
+ obj: &v1.Pipeline{},
}, {
- name: "v1alpha1 stepaction",
- obj: &v1alpha1.StepAction{},
- wantObj: &v1alpha1.StepAction{},
+ name: "v1beta1 pipeline",
+ obj: &v1beta1.Pipeline{},
}, {
- name: "v1beta1 stepaction",
- obj: &v1beta1.StepAction{},
- wantObj: &v1beta1.StepAction{},
+ name: "v1alpha1 stepaction",
+ obj: &v1alpha1.StepAction{},
}, {
name: "unsupported gvk",
obj: &v1beta1.ClusterTask{},
wantErr: true,
- wantObj: nil,
}}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tektonclient := fake.NewSimpleClientset()
- mutatedObj, err := apiserver.DryRunValidate(context.Background(), "default", tc.obj, tektonclient)
+ err := apiserver.DryRunValidate(context.Background(), "default", tc.obj, tektonclient)
if (err != nil) != tc.wantErr {
t.Errorf("wantErr was %t but got err %v", tc.wantErr, err)
}
- if d := cmp.Diff(tc.wantObj, mutatedObj, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "Namespace")); d != "" {
- t.Errorf("wrong object: %s", d)
- }
})
}
}
@@ -94,10 +79,6 @@ func TestDryRunCreate_Invalid_DifferentGVKs(t *testing.T) {
name: "v1alpha1 stepaction",
obj: &v1alpha1.StepAction{},
wantErr: apiserver.ErrReferencedObjectValidationFailed,
- }, {
- name: "v1beta1 stepaction",
- obj: &v1beta1.StepAction{},
- wantErr: apiserver.ErrReferencedObjectValidationFailed,
}, {
name: "unsupported gvk",
obj: &v1beta1.ClusterTask{},
@@ -115,7 +96,7 @@ func TestDryRunCreate_Invalid_DifferentGVKs(t *testing.T) {
tektonclient.PrependReactor("create", "stepactions", func(action ktesting.Action) (bool, runtime.Object, error) {
return true, nil, apierrors.NewBadRequest("bad request")
})
- _, err := apiserver.DryRunValidate(context.Background(), "default", tc.obj, tektonclient)
+ err := apiserver.DryRunValidate(context.Background(), "default", tc.obj, tektonclient)
if d := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); d != "" {
t.Errorf("wrong error: %s", d)
}
@@ -162,7 +143,7 @@ func TestDryRunCreate_DifferentErrTypes(t *testing.T) {
tektonclient.PrependReactor("create", "tasks", func(action ktesting.Action) (bool, runtime.Object, error) {
return true, nil, tc.webhookErr
})
- _, err := apiserver.DryRunValidate(context.Background(), "default", &v1.Task{}, tektonclient)
+ err := apiserver.DryRunValidate(context.Background(), "default", &v1.Task{}, tektonclient)
if d := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()); d != "" {
t.Errorf("wrong error: %s", d)
}
diff --git a/upstream/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go b/upstream/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go
index 907e032601b..f44e0a7c96c 100644
--- a/upstream/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go
+++ b/upstream/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go
@@ -106,7 +106,7 @@ func TestSendCloudEventWithRetries(t *testing.T) {
ceClient.CheckCloudEventsUnordered(t, tc.name, tc.wantCEvents)
recorder := controller.GetEventRecorder(ctx).(*record.FakeRecorder)
if err := k8sevent.CheckEventsOrdered(t, recorder.Events, tc.name, tc.wantEvents); err != nil {
- t.Fatal(err.Error())
+ t.Fatalf(err.Error())
}
})
}
@@ -223,7 +223,7 @@ func TestEmitCloudEvents(t *testing.T) {
recorder := controller.GetEventRecorder(ctx).(*record.FakeRecorder)
cloudevent.EmitCloudEvents(ctx, object)
if err := k8sevent.CheckEventsOrdered(t, recorder.Events, tc.name, tc.wantEvents); err != nil {
- t.Fatal(err.Error())
+ t.Fatalf(err.Error())
}
fakeClient.CheckCloudEventsUnordered(t, tc.name, tc.wantCloudEvents)
}
diff --git a/upstream/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go b/upstream/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go
index 4eabf274194..92692943918 100644
--- a/upstream/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go
+++ b/upstream/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go
@@ -111,7 +111,7 @@ func (c *FakeClient) CheckCloudEventsUnordered(t *testing.T, testName string, wa
// extra events are prevented in FakeClient's Send function.
// fewer events are detected because we collect all events from channel and compare with wantEvents
- for range channelEvents {
+ for eventCount := 0; eventCount < channelEvents; eventCount++ {
event := <-c.events
if len(expected) == 0 {
t.Errorf("extra event received: %q", event)
diff --git a/upstream/pkg/reconciler/events/event_test.go b/upstream/pkg/reconciler/events/event_test.go
index ca0135a4ae4..5e790d347cd 100644
--- a/upstream/pkg/reconciler/events/event_test.go
+++ b/upstream/pkg/reconciler/events/event_test.go
@@ -109,7 +109,7 @@ func TestEmit(t *testing.T) {
recorder := controller.GetEventRecorder(ctx).(*record.FakeRecorder)
events.Emit(ctx, nil, after, object)
if err := k8sevent.CheckEventsOrdered(t, recorder.Events, tc.name, tc.wantEvents); err != nil {
- t.Fatal(err.Error())
+ t.Fatalf(err.Error())
}
fakeClient.CheckCloudEventsUnordered(t, tc.name, tc.wantCloudEvents)
}
diff --git a/upstream/pkg/reconciler/events/k8sevent/event_test.go b/upstream/pkg/reconciler/events/k8sevent/event_test.go
index db4f4fd2016..612c4177a31 100644
--- a/upstream/pkg/reconciler/events/k8sevent/event_test.go
+++ b/upstream/pkg/reconciler/events/k8sevent/event_test.go
@@ -180,7 +180,7 @@ func TestEmitK8sEventsOnConditions(t *testing.T) {
k8sevents.EmitK8sEvents(ctx, ts.before, ts.after, tr)
err := k8sevents.CheckEventsOrdered(t, recorder.Events, ts.name, ts.wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
}
}
@@ -237,7 +237,7 @@ func TestEmitK8sEvents(t *testing.T) {
recorder := controller.GetEventRecorder(ctx).(*record.FakeRecorder)
k8sevents.EmitK8sEvents(ctx, nil, after, object)
if err := k8sevents.CheckEventsOrdered(t, recorder.Events, tc.name, tc.wantEvents); err != nil {
- t.Fatal(err.Error())
+ t.Fatalf(err.Error())
}
}
}
@@ -263,7 +263,7 @@ func TestEmitError(t *testing.T) {
k8sevents.EmitError(fr, ts.err, tr)
err := k8sevents.CheckEventsOrdered(t, fr.Events, ts.name, ts.wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
}
}
diff --git a/upstream/pkg/reconciler/events/k8sevent/events.go b/upstream/pkg/reconciler/events/k8sevent/events.go
index 048308de881..3b35370cea2 100644
--- a/upstream/pkg/reconciler/events/k8sevent/events.go
+++ b/upstream/pkg/reconciler/events/k8sevent/events.go
@@ -46,7 +46,7 @@ func eventsFromChannel(c chan string, wantEvents []string) error {
// on the channel forever if fewer than expected events are received
timer := time.After(wait.ForeverTestTimeout)
foundEvents := []string{}
- for ii := range wantEvents {
+ for ii := 0; ii < len(wantEvents); ii++ {
// We loop over all the events that we expect. Once they are all received
// we exit the loop. If we never receive enough events, the timeout takes us
// out of the loop.
diff --git a/upstream/pkg/reconciler/pipeline/dag/dag_test.go b/upstream/pkg/reconciler/pipeline/dag/dag_test.go
index bf85b7d4f59..056f5b73a39 100644
--- a/upstream/pkg/reconciler/pipeline/dag/dag_test.go
+++ b/upstream/pkg/reconciler/pipeline/dag/dag_test.go
@@ -219,8 +219,7 @@ func TestBuild_JoinMultipleRoots(t *testing.T) {
"c": nodeC,
"x": nodeX,
"y": nodeY,
- "z": nodeZ,
- },
+ "z": nodeZ},
}
p := &v1.Pipeline{
ObjectMeta: metav1.ObjectMeta{Name: "pipeline"},
@@ -545,76 +544,75 @@ func TestBuild_InvalidDAG(t *testing.T) {
name string
spec v1.PipelineSpec
err string
- }{
- {
- // a
- // |
- // a ("a" uses result of "a" as params)
- name: "self-link-result",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{selfLinkResult}},
- err: "cycle detected",
- }, {
- // a
- // |
- // a ("a" runAfter "a")
- name: "self-link-after",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{selfLinkAfter}},
- err: "cycle detected",
- }, {
- // a (also "a" depends on resource from "z")
- // |
- // x ("x" depends on resource from "a")
- // |
- // z ("z" depends on resource from "x")
- name: "cycle-from",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xDependsOnA, zDependsOnX, aDependsOnZ}},
- err: "cycle detected",
- }, {
- // a (also "a" runAfter "z")
- // |
- // x ("x" runAfter "a")
- // |
- // z ("z" runAfter "x")
- name: "cycle-runAfter",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xAfterA, zAfterX, aAfterZ}},
- err: "cycle detected",
- }, {
- // a (also "a" depends on resource from "z")
- // |
- // x ("x" depends on resource from "a")
- // |
- // z ("z" runAfter "x")
- name: "cycle-both",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xDependsOnA, zAfterX, aDependsOnZ}},
- err: "cycle detected",
- }, {
- // This test make sure we detect a cyclic branch in a DAG with multiple branches.
- // The following DAG is having a cyclic branch with an additional dependency (a runAfter e)
- // a
- // / \
- // b c
- // \ /
- // d
- // / \
- // e f
- // |
- // g
- name: "multiple-branches-with-one-cyclic-branch",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{aRunsAfterE, bDependsOnA, cRunsAfterA, dDependsOnBAndC, eRunsAfterD, fRunsAfterD, gDependsOnF}},
- err: "cycle detected",
- }, {
- name: "duplicate-tasks",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{a, a}},
- err: "duplicate pipeline task",
- }, {
- name: "invalid-task-result",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{invalidTaskResult}},
- err: "wasn't present in Pipeline",
- }, {
- name: "invalid-task-name-after",
- spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{invalidTaskAfter}},
- err: "wasn't present in Pipeline",
- },
+ }{{
+ // a
+ // |
+ // a ("a" uses result of "a" as params)
+ name: "self-link-result",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{selfLinkResult}},
+ err: "cycle detected",
+ }, {
+ // a
+ // |
+ // a ("a" runAfter "a")
+ name: "self-link-after",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{selfLinkAfter}},
+ err: "cycle detected",
+ }, {
+ // a (also "a" depends on resource from "z")
+ // |
+ // x ("x" depends on resource from "a")
+ // |
+ // z ("z" depends on resource from "x")
+ name: "cycle-from",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xDependsOnA, zDependsOnX, aDependsOnZ}},
+ err: "cycle detected",
+ }, {
+ // a (also "a" runAfter "z")
+ // |
+ // x ("x" runAfter "a")
+ // |
+ // z ("z" runAfter "x")
+ name: "cycle-runAfter",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xAfterA, zAfterX, aAfterZ}},
+ err: "cycle detected",
+ }, {
+ // a (also "a" depends on resource from "z")
+ // |
+ // x ("x" depends on resource from "a")
+ // |
+ // z ("z" runAfter "x")
+ name: "cycle-both",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{xDependsOnA, zAfterX, aDependsOnZ}},
+ err: "cycle detected",
+ }, {
+ // This test make sure we detect a cyclic branch in a DAG with multiple branches.
+ // The following DAG is having a cyclic branch with an additional dependency (a runAfter e)
+ // a
+ // / \
+ // b c
+ // \ /
+ // d
+ // / \
+ // e f
+ // |
+ // g
+ name: "multiple-branches-with-one-cyclic-branch",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{aRunsAfterE, bDependsOnA, cRunsAfterA, dDependsOnBAndC, eRunsAfterD, fRunsAfterD, gDependsOnF}},
+ err: "cycle detected",
+ }, {
+ name: "duplicate-tasks",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{a, a}},
+ err: "duplicate pipeline task",
+ }, {
+ name: "invalid-task-result",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{invalidTaskResult}},
+ err: "wasn't present in Pipeline",
+ }, {
+ name: "invalid-task-name-after",
+ spec: v1.PipelineSpec{Tasks: []v1.PipelineTask{invalidTaskAfter}},
+ err: "wasn't present in Pipeline",
+ },
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
@@ -638,7 +636,7 @@ func TestBuildGraphWithHundredsOfTasks_Success(t *testing.T) {
// ..
// b04 - 000 - 001 - ... - 100
nBranches, nTasks := 5, 100
- for branchIdx := range nBranches {
+ for branchIdx := 0; branchIdx < nBranches; branchIdx++ {
var taskDeps []string
firstTaskName := fmt.Sprintf("b%02d", branchIdx)
firstTask := v1.PipelineTask{
@@ -648,7 +646,7 @@ func TestBuildGraphWithHundredsOfTasks_Success(t *testing.T) {
}
tasks = append(tasks, firstTask)
taskDeps = append(taskDeps, firstTaskName)
- for taskIdx := range nTasks {
+ for taskIdx := 0; taskIdx < nTasks; taskIdx++ {
taskName := fmt.Sprintf("%s-%03d", firstTaskName, taskIdx)
task := v1.PipelineTask{
Name: taskName,
@@ -670,7 +668,7 @@ func TestBuildGraphWithHundredsOfTasks_InvalidDAG(t *testing.T) {
var tasks []v1.PipelineTask
// branches with circular interdependencies
nBranches, nTasks := 5, 100
- for branchIdx := range nBranches {
+ for branchIdx := 0; branchIdx < nBranches; branchIdx++ {
depBranchIdx := branchIdx + 1
if depBranchIdx == nBranches {
depBranchIdx = 0
@@ -684,7 +682,7 @@ func TestBuildGraphWithHundredsOfTasks_InvalidDAG(t *testing.T) {
}
tasks = append(tasks, firstTask)
taskDeps = append(taskDeps, firstTaskName)
- for taskIdx := range nTasks {
+ for taskIdx := 0; taskIdx < nTasks; taskIdx++ {
taskName := fmt.Sprintf("%s-%03d", firstTaskName, taskIdx)
task := v1.PipelineTask{
Name: taskName,
@@ -801,38 +799,37 @@ func TestFindCyclesInDependencies(t *testing.T) {
name string
deps map[string][]string
err string
- }{
- {
- name: "valid-empty-deps",
- deps: map[string][]string{
- "a": {},
- "b": {"c", "d"},
- "c": {},
- "d": {},
- },
- }, {
- name: "self-link",
- deps: map[string][]string{
- "a": {"a"},
- },
- err: `task "a" depends on "a"`,
- }, {
- name: "interdependent-tasks",
- deps: map[string][]string{
- "a": {"b"},
- "b": {"a"},
- },
- err: `task "a" depends on "b"`,
- }, {
- name: "multiple-cycles",
- deps: map[string][]string{
- "a": {"b", "c"},
- "b": {"a"},
- "c": {"d"},
- "d": {"a", "b"},
- },
- err: `task "a" depends on "b", "c"`,
+ }{{
+ name: "valid-empty-deps",
+ deps: map[string][]string{
+ "a": {},
+ "b": {"c", "d"},
+ "c": {},
+ "d": {},
+ },
+ }, {
+ name: "self-link",
+ deps: map[string][]string{
+ "a": {"a"},
+ },
+ err: `task "a" depends on "a"`,
+ }, {
+ name: "interdependent-tasks",
+ deps: map[string][]string{
+ "a": {"b"},
+ "b": {"a"},
+ },
+ err: `task "a" depends on "b"`,
+ }, {
+ name: "multiple-cycles",
+ deps: map[string][]string{
+ "a": {"b", "c"},
+ "b": {"a"},
+ "c": {"d"},
+ "d": {"a", "b"},
},
+ err: `task "a" depends on "b", "c"`,
+ },
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
diff --git a/upstream/pkg/reconciler/pipelinerun/affinity_assistant.go b/upstream/pkg/reconciler/pipelinerun/affinity_assistant.go
index 94aab0951ab..87ff43fe3b8 100644
--- a/upstream/pkg/reconciler/pipelinerun/affinity_assistant.go
+++ b/upstream/pkg/reconciler/pipelinerun/affinity_assistant.go
@@ -29,7 +29,6 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
aa "github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
- pipelinePod "github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/workspace"
appsv1 "k8s.io/api/apps/v1"
@@ -140,12 +139,7 @@ func (c *Reconciler) createOrUpdateAffinityAssistant(ctx context.Context, affini
if err != nil {
return []error{err}
}
- affinityAssistantContainerConfig := aa.ContainerConfig{
- Image: c.Images.NopImage,
- SetSecurityContext: cfg.FeatureFlags.SetSecurityContext,
- }
-
- affinityAssistantStatefulSet := affinityAssistantStatefulSet(aaBehavior, affinityAssistantName, pr, claimTemplates, claimNames, affinityAssistantContainerConfig, cfg.Defaults.DefaultAAPodTemplate)
+ affinityAssistantStatefulSet := affinityAssistantStatefulSet(aaBehavior, affinityAssistantName, pr, claimTemplates, claimNames, c.Images.NopImage, cfg.Defaults.DefaultAAPodTemplate)
_, err = c.KubeClientSet.AppsV1().StatefulSets(pr.Namespace).Create(ctx, affinityAssistantStatefulSet, metav1.CreateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("failed to create StatefulSet %s: %w", affinityAssistantName, err))
@@ -287,7 +281,7 @@ func getStatefulSetLabels(pr *v1.PipelineRun, affinityAssistantName string) map[
// with the given AffinityAssistantTemplate applied to the StatefulSet PodTemplateSpec.
// The VolumeClaimTemplates and Volume of StatefulSet reference the PipelineRun WorkspaceBinding VolumeClaimTempalte and the PVCs respectively.
// The PVs created by the StatefulSet are scheduled to the same availability zone which avoids PV scheduling conflict.
-func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name string, pr *v1.PipelineRun, claimTemplates []corev1.PersistentVolumeClaim, claimNames []string, containerConfig aa.ContainerConfig, defaultAATpl *pod.AffinityAssistantTemplate) *appsv1.StatefulSet {
+func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name string, pr *v1.PipelineRun, claimTemplates []corev1.PersistentVolumeClaim, claimNames []string, affinityAssistantImage string, defaultAATpl *pod.AffinityAssistantTemplate) *appsv1.StatefulSet {
// We want a singleton pod
replicas := int32(1)
@@ -302,23 +296,9 @@ func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name
mounts = append(mounts, corev1.VolumeMount{Name: claimTemplate.Name, MountPath: claimTemplate.Name})
}
- securityContext := &corev1.SecurityContext{}
- if containerConfig.SetSecurityContext {
- securityContext = pipelinePod.LinuxSecurityContext
-
- if tpl.NodeSelector[pipelinePod.OsSelectorLabel] == "windows" {
- securityContext = pipelinePod.WindowsSecurityContext
- }
- }
-
- var priorityClassName string
- if tpl.PriorityClassName != nil {
- priorityClassName = *tpl.PriorityClassName
- }
-
containers := []corev1.Container{{
Name: "affinity-assistant",
- Image: containerConfig.Image,
+ Image: affinityAssistantImage,
Args: []string{"tekton_run_indefinitely"},
// Set requests == limits to get QoS class _Guaranteed_.
@@ -334,8 +314,7 @@ func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name
"memory": resource.MustParse("100Mi"),
},
},
- SecurityContext: securityContext,
- VolumeMounts: mounts,
+ VolumeMounts: mounts,
}}
var volumes []corev1.Volume
@@ -380,11 +359,9 @@ func affinityAssistantStatefulSet(aaBehavior aa.AffinityAssistantBehavior, name
Spec: corev1.PodSpec{
Containers: containers,
- Tolerations: tpl.Tolerations,
- NodeSelector: tpl.NodeSelector,
- ImagePullSecrets: tpl.ImagePullSecrets,
- SecurityContext: tpl.SecurityContext,
- PriorityClassName: priorityClassName,
+ Tolerations: tpl.Tolerations,
+ NodeSelector: tpl.NodeSelector,
+ ImagePullSecrets: tpl.ImagePullSecrets,
Affinity: getAssistantAffinityMergedWithPodTemplateAffinity(pr, aaBehavior),
Volumes: volumes,
diff --git a/upstream/pkg/reconciler/pipelinerun/affinity_assistant_test.go b/upstream/pkg/reconciler/pipelinerun/affinity_assistant_test.go
index 35c22df73de..d20a18526c0 100644
--- a/upstream/pkg/reconciler/pipelinerun/affinity_assistant_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/affinity_assistant_test.go
@@ -22,8 +22,6 @@ import (
"fmt"
"testing"
- "knative.dev/pkg/ptr"
-
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
@@ -32,7 +30,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline/pod"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
aa "github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
- pipelinePod "github.com/tektoncd/pipeline/pkg/pod"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/workspace"
"github.com/tektoncd/pipeline/test/diff"
@@ -52,21 +49,11 @@ import (
_ "knative.dev/pkg/system/testing" // Setup system.Namespace()
)
-var (
- podSpecFilter cmp.Option = cmpopts.IgnoreFields(corev1.PodSpec{}, "Affinity")
- podTemplateSpecFilter cmp.Option = cmpopts.IgnoreFields(corev1.PodTemplateSpec{}, "ObjectMeta")
- podContainerFilter cmp.Option = cmpopts.IgnoreFields(corev1.Container{}, "Resources", "Args", "VolumeMounts")
+var podSpecFilter cmp.Option = cmpopts.IgnoreFields(corev1.PodSpec{}, "Containers", "Affinity")
+var podTemplateSpecFilter cmp.Option = cmpopts.IgnoreFields(corev1.PodTemplateSpec{}, "ObjectMeta")
- containerConfigWithoutSecurityContext = aa.ContainerConfig{
- Image: "nginx",
- SetSecurityContext: false,
- }
-)
-
-var (
- workspacePVCName = "test-workspace-pvc"
- workspaceVolumeClaimTemplateName = "test-workspace-vct"
-)
+var workspacePVCName = "test-workspace-pvc"
+var workspaceVolumeClaimTemplateName = "test-workspace-vct"
var testPRWithPVC = &v1.PipelineRun{
TypeMeta: metav1.TypeMeta{Kind: "PipelineRun"},
@@ -82,7 +69,6 @@ var testPRWithPVC = &v1.PipelineRun{
}},
},
}
-
var testPRWithVolumeClaimTemplate = &v1.PipelineRun{
TypeMeta: metav1.TypeMeta{Kind: "PipelineRun"},
ObjectMeta: metav1.ObjectMeta{
@@ -95,27 +81,23 @@ var testPRWithVolumeClaimTemplate = &v1.PipelineRun{
}},
},
}
-
var testPRWithVolumeClaimTemplateAndPVC = &v1.PipelineRun{
TypeMeta: metav1.TypeMeta{Kind: "PipelineRun"},
ObjectMeta: metav1.ObjectMeta{
Name: "pipelinerun-with-volumeClaimTemplate-and-pvc",
},
Spec: v1.PipelineRunSpec{
- Workspaces: []v1.WorkspaceBinding{
- {
- Name: workspaceVolumeClaimTemplateName,
- VolumeClaimTemplate: &corev1.PersistentVolumeClaim{},
- }, {
- Name: workspacePVCName,
- PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
- ClaimName: "myclaim",
- },
- },
+ Workspaces: []v1.WorkspaceBinding{{
+ Name: workspaceVolumeClaimTemplateName,
+ VolumeClaimTemplate: &corev1.PersistentVolumeClaim{},
+ }, {
+ Name: workspacePVCName,
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "myclaim",
+ }},
},
},
}
-
var testPRWithEmptyDir = &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Name: "pipelinerun-with-emptyDir"},
Spec: v1.PipelineRunSpec{
@@ -126,31 +108,14 @@ var testPRWithEmptyDir = &v1.PipelineRun{
},
}
-var testPRWithWindowsOs = &v1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{Name: "pipelinerun-with-windows"},
- Spec: v1.PipelineRunSpec{
- TaskRunTemplate: v1.PipelineTaskRunTemplate{
- PodTemplate: &pod.PodTemplate{
- NodeSelector: map[string]string{pipelinePod.OsSelectorLabel: "windows"},
- },
- },
- Workspaces: []v1.WorkspaceBinding{{
- Name: "EmptyDir Workspace",
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- }},
- },
-}
-
// TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun tests to create and delete Affinity Assistants and PVCs
// per pipelinerun for a given PipelineRun
func TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun(t *testing.T) {
replicas := int32(1)
-
tests := []struct {
name string
pr *v1.PipelineRun
expectStatefulSetSpec *appsv1.StatefulSetSpec
- featureFlags map[string]string
}{{
name: "PersistentVolumeClaim Workspace type",
pr: testPRWithPVC,
@@ -165,10 +130,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun(t *testing.T) {
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -190,14 +151,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun(t *testing.T) {
workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
},
},
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
- },
- },
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{{
ObjectMeta: metav1.ObjectMeta{Name: "pvc-b9eea16dce"},
}},
@@ -219,10 +172,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun(t *testing.T) {
}},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -244,79 +193,17 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerPipelineRun(t *testing.T) {
workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
},
},
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
- },
- },
- },
- }, {
- name: "securityContext feature enabled and os is Windows",
- pr: testPRWithWindowsOs,
- featureFlags: map[string]string{
- "set-security-context": "true",
- },
- expectStatefulSetSpec: &appsv1.StatefulSetSpec{
- Replicas: &replicas,
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- pipeline.PipelineRunLabelKey: testPRWithWindowsOs.Name,
- workspace.LabelInstance: "affinity-assistant-01cecfbdec",
- workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
- },
- },
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- NodeSelector: map[string]string{pipelinePod.OsSelectorLabel: "windows"},
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: pipelinePod.WindowsSecurityContext,
- }},
- },
- },
- },
- }, {
- name: "securityContext feature enabled and os is Linux",
- pr: testPRWithEmptyDir,
- featureFlags: map[string]string{
- "set-security-context": "true",
- },
- expectStatefulSetSpec: &appsv1.StatefulSetSpec{
- Replicas: &replicas,
- Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- pipeline.PipelineRunLabelKey: testPRWithEmptyDir.Name,
- workspace.LabelInstance: "affinity-assistant-c655a0c8a2",
- workspace.LabelComponent: workspace.ComponentNameAffinityAssistant,
- },
- },
- Template: corev1.PodTemplateSpec{
- Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: pipelinePod.LinuxSecurityContext,
- }},
- },
- },
},
}}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- featureFlags := map[string]string{
+ configMap := map[string]string{
"disable-affinity-assistant": "true",
"coschedule": "pipelineruns",
}
-
- for k, v := range tc.featureFlags {
- featureFlags[k] = v
- }
-
kubeClientSet := fakek8s.NewSimpleClientset()
- ctx := cfgtesting.SetFeatureFlags(context.Background(), t, featureFlags)
+ ctx := cfgtesting.SetFeatureFlags(context.Background(), t, configMap)
c := Reconciler{
KubeClientSet: kubeClientSet,
pvcHandler: volumeclaim.NewPVCHandler(kubeClientSet, zap.NewExample().Sugar()),
@@ -368,10 +255,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -397,10 +280,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -431,10 +310,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -442,8 +317,7 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
},
}},
},
- },
- }, {
+ }}, {
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
@@ -454,10 +328,6 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
},
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
- Containers: []corev1.Container{{
- Name: "affinity-assistant",
- SecurityContext: &corev1.SecurityContext{},
- }},
Volumes: []corev1.Volume{{
Name: "workspace-0",
VolumeSource: corev1.VolumeSource{
@@ -475,6 +345,7 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCsPerWorkspaceOrDisabled(t *testin
}}
for _, tc := range tests {
+ tc := tc
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
kubeClientSet := fakek8s.NewSimpleClientset()
@@ -595,7 +466,7 @@ func TestCreateOrUpdateAffinityAssistantsAndPVCs_Failure(t *testing.T) {
}
// TestCreateOrUpdateAffinityAssistantWhenNodeIsCordoned tests an existing Affinity Assistant can identify the node failure and
-// can migrate the affinity assistant pod to a healthy node so that the existing pipelineRun runs to compleition
+// can migrate the affinity assistant pod to a healthy node so that the existing pipelineRun runs to competition
func TestCreateOrUpdateAffinityAssistantWhenNodeIsCordoned(t *testing.T) {
expectedAffinityAssistantName := GetAffinityAssistantName(workspacePVCName, testPRWithPVC.Name)
@@ -736,32 +607,24 @@ func TestPipelineRunPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
ImagePullSecrets: []corev1.LocalObjectReference{{
Name: "reg-creds",
}},
- SecurityContext: &corev1.PodSecurityContext{
- RunAsNonRoot: ptr.Bool(true),
- SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
- },
},
},
},
}
- stsWithOverridenTemplateFields := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, containerConfigWithoutSecurityContext, nil)
+ stsWithTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, "nginx", nil)
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.Tolerations) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.Tolerations) != 1 {
t.Errorf("expected Tolerations in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.NodeSelector) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.NodeSelector) != 1 {
t.Errorf("expected a NodeSelector in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.ImagePullSecrets) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.ImagePullSecrets) != 1 {
t.Errorf("expected ImagePullSecrets in the StatefulSet")
}
-
- if stsWithOverridenTemplateFields.Spec.Template.Spec.SecurityContext == nil {
- t.Errorf("expected a SecurityContext in the StatefulSet")
- }
}
func TestDefaultPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
@@ -770,15 +633,7 @@ func TestDefaultPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "pipelinerun-with-custom-podtemplate",
},
- Spec: v1.PipelineRunSpec{
- TaskRunTemplate: v1.PipelineTaskRunTemplate{
- PodTemplate: &pod.PodTemplate{
- HostNetwork: true,
- },
- },
- },
}
- priorityClassName := "test-priority"
defaultTpl := &pod.AffinityAssistantTemplate{
Tolerations: []corev1.Toleration{{
@@ -793,34 +648,21 @@ func TestDefaultPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
ImagePullSecrets: []corev1.LocalObjectReference{{
Name: "reg-creds",
}},
- SecurityContext: &corev1.PodSecurityContext{
- RunAsNonRoot: ptr.Bool(true),
- SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault},
- },
- PriorityClassName: &priorityClassName,
}
- stsWithOverridenTemplateFields := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, containerConfigWithoutSecurityContext, defaultTpl)
+ stsWithTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, "nginx", defaultTpl)
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.Tolerations) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.Tolerations) != 1 {
t.Errorf("expected Tolerations in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.NodeSelector) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.NodeSelector) != 1 {
t.Errorf("expected a NodeSelector in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.ImagePullSecrets) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.ImagePullSecrets) != 1 {
t.Errorf("expected ImagePullSecrets in the StatefulSet")
}
-
- if stsWithOverridenTemplateFields.Spec.Template.Spec.SecurityContext == nil {
- t.Errorf("expected SecurityContext in the StatefulSet")
- }
-
- if stsWithOverridenTemplateFields.Spec.Template.Spec.PriorityClassName == "" {
- t.Errorf("expected PriorityClassName in the StatefulSet")
- }
}
func TestMergedPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
@@ -842,9 +684,7 @@ func TestMergedPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
{Name: "reg-creds"},
{Name: "alt-creds"},
},
- SecurityContext: &corev1.PodSecurityContext{RunAsNonRoot: ptr.Bool(true)},
- },
- },
+ }},
},
}
@@ -855,28 +695,21 @@ func TestMergedPodTemplatesArePropagatedToAffinityAssistant(t *testing.T) {
ImagePullSecrets: []corev1.LocalObjectReference{{
Name: "reg-creds",
}},
- SecurityContext: &corev1.PodSecurityContext{
- RunAsNonRoot: ptr.Bool(false),
- },
}
- stsWithOverridenTemplateFields := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, containerConfigWithoutSecurityContext, defaultTpl)
+ stsWithTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, "nginx", defaultTpl)
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.Tolerations) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.Tolerations) != 1 {
t.Errorf("expected Tolerations from spec in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.NodeSelector) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.NodeSelector) != 1 {
t.Errorf("expected NodeSelector from defaults in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.ImagePullSecrets) != 2 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.ImagePullSecrets) != 2 {
t.Errorf("expected ImagePullSecrets from spec to overwrite default in the StatefulSet")
}
-
- if stsWithOverridenTemplateFields.Spec.Template.Spec.SecurityContext.RunAsNonRoot == ptr.Bool(true) {
- t.Errorf("expected SecurityContext from spec to overwrite default in the StatefulSet")
- }
}
func TestOnlySelectPodTemplateFieldsArePropagatedToAffinityAssistant(t *testing.T) {
@@ -898,18 +731,17 @@ func TestOnlySelectPodTemplateFieldsArePropagatedToAffinityAssistant(t *testing.
IP: "1.2.3.4",
Hostnames: []string{"localhost"},
}},
- },
- },
+ }},
},
}
- stsWithOverridenTemplateFields := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, containerConfigWithoutSecurityContext, nil)
+ stsWithTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, "nginx", nil)
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.Tolerations) != 1 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.Tolerations) != 1 {
t.Errorf("expected Tolerations from spec in the StatefulSet")
}
- if len(stsWithOverridenTemplateFields.Spec.Template.Spec.HostAliases) != 0 {
+ if len(stsWithTolerationsAndNodeSelector.Spec.Template.Spec.HostAliases) != 0 {
t.Errorf("expected HostAliases to not be passed from pod template")
}
}
@@ -923,7 +755,7 @@ func TestThatTheAffinityAssistantIsWithoutNodeSelectorAndTolerations(t *testing.
Spec: v1.PipelineRunSpec{},
}
- stsWithoutTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithoutCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, containerConfigWithoutSecurityContext, nil)
+ stsWithoutTolerationsAndNodeSelector := affinityAssistantStatefulSet(aa.AffinityAssistantPerWorkspace, "test-assistant", prWithoutCustomPodTemplate, []corev1.PersistentVolumeClaim{}, []string{}, "nginx", nil)
if len(stsWithoutTolerationsAndNodeSelector.Spec.Template.Spec.Tolerations) != 0 {
t.Errorf("unexpected Tolerations in the StatefulSet")
@@ -932,10 +764,6 @@ func TestThatTheAffinityAssistantIsWithoutNodeSelectorAndTolerations(t *testing.
if len(stsWithoutTolerationsAndNodeSelector.Spec.Template.Spec.NodeSelector) != 0 {
t.Errorf("unexpected NodeSelector in the StatefulSet")
}
-
- if stsWithoutTolerationsAndNodeSelector.Spec.Template.Spec.SecurityContext != nil {
- t.Errorf("unexpected SecurityContext in the StatefulSet")
- }
}
// TestThatAffinityAssistantNameIsNoLongerThan53 tests that the Affinity Assistant Name
@@ -1405,7 +1233,7 @@ func validateStatefulSetSpec(t *testing.T, ctx context.Context, c Reconciler, ex
if err != nil {
t.Fatalf("unexpected error when retrieving StatefulSet: %v", err)
}
- if d := cmp.Diff(expectStatefulSetSpec, &aa.Spec, podSpecFilter, podTemplateSpecFilter, podContainerFilter); d != "" {
+ if d := cmp.Diff(expectStatefulSetSpec, &aa.Spec, podSpecFilter, podTemplateSpecFilter); d != "" {
t.Errorf("StatefulSetSpec diff: %s", diff.PrintWantGot(d))
}
} else if !apierrors.IsNotFound(err) {
diff --git a/upstream/pkg/reconciler/pipelinerun/cancel.go b/upstream/pkg/reconciler/pipelinerun/cancel.go
index c198569da87..9b7dc48c440 100644
--- a/upstream/pkg/reconciler/pipelinerun/cancel.go
+++ b/upstream/pkg/reconciler/pipelinerun/cancel.go
@@ -24,7 +24,6 @@ import (
"strings"
"time"
- pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
@@ -89,10 +88,6 @@ func cancelTaskRun(ctx context.Context, taskRunName string, namespace string, cl
// still be able to cancel the PipelineRun
return nil
}
- if pipelineErrors.IsImmutableTaskRunSpecError(err) {
- // The TaskRun may have completed and the spec field is immutable, we should ignore this error.
- return nil
- }
return err
}
diff --git a/upstream/pkg/reconciler/pipelinerun/cancel_test.go b/upstream/pkg/reconciler/pipelinerun/cancel_test.go
index 0d1baa7e25d..72ab2e5ce04 100644
--- a/upstream/pkg/reconciler/pipelinerun/cancel_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/cancel_test.go
@@ -295,6 +295,7 @@ func TestCancelPipelineRun(t *testing.T) {
wantErr: true,
}}
for _, tc := range testCases {
+ tc := tc
t.Run(tc.name, func(t *testing.T) {
d := test.Data{
PipelineRuns: []*v1.PipelineRun{tc.pipelineRun},
@@ -366,38 +367,37 @@ func TestGetChildObjectsFromPRStatusForTaskNames(t *testing.T) {
expectedRunNames []string
expectedCustomRunNames []string
hasError bool
- }{
- {
- name: "beta custom tasks",
- prStatus: v1.PipelineRunStatus{PipelineRunStatusFields: v1.PipelineRunStatusFields{
- ChildReferences: []v1.ChildStatusReference{{
- TypeMeta: runtime.TypeMeta{
- APIVersion: v1beta1.SchemeGroupVersion.String(),
- Kind: customRun,
- },
- Name: "r1",
- PipelineTaskName: "run-1",
- }},
+ }{{
+ name: "beta custom tasks",
+ prStatus: v1.PipelineRunStatus{PipelineRunStatusFields: v1.PipelineRunStatusFields{
+ ChildReferences: []v1.ChildStatusReference{{
+ TypeMeta: runtime.TypeMeta{
+ APIVersion: v1beta1.SchemeGroupVersion.String(),
+ Kind: customRun,
+ },
+ Name: "r1",
+ PipelineTaskName: "run-1",
}},
- expectedCustomRunNames: []string{"r1"},
- hasError: false,
- }, {
- name: "unknown kind",
- prStatus: v1.PipelineRunStatus{PipelineRunStatusFields: v1.PipelineRunStatusFields{
- ChildReferences: []v1.ChildStatusReference{{
- TypeMeta: runtime.TypeMeta{
- APIVersion: "v1",
- Kind: "UnknownKind",
- },
- Name: "u1",
- PipelineTaskName: "unknown-1",
- }},
+ }},
+ expectedCustomRunNames: []string{"r1"},
+ hasError: false,
+ }, {
+ name: "unknown kind",
+ prStatus: v1.PipelineRunStatus{PipelineRunStatusFields: v1.PipelineRunStatusFields{
+ ChildReferences: []v1.ChildStatusReference{{
+ TypeMeta: runtime.TypeMeta{
+ APIVersion: "v1",
+ Kind: "UnknownKind",
+ },
+ Name: "u1",
+ PipelineTaskName: "unknown-1",
}},
- expectedTRNames: nil,
- expectedRunNames: nil,
- expectedCustomRunNames: nil,
- hasError: true,
- },
+ }},
+ expectedTRNames: nil,
+ expectedRunNames: nil,
+ expectedCustomRunNames: nil,
+ hasError: true,
+ },
}
for _, tc := range testCases {
diff --git a/upstream/pkg/reconciler/pipelinerun/controller.go b/upstream/pkg/reconciler/pipelinerun/controller.go
index d47ef8d7760..5df3f698548 100644
--- a/upstream/pkg/reconciler/pipelinerun/controller.go
+++ b/upstream/pkg/reconciler/pipelinerun/controller.go
@@ -33,7 +33,7 @@ import (
"github.com/tektoncd/pipeline/pkg/pipelinerunmetrics"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
- resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
+ resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/tracing"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
@@ -62,12 +62,8 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
verificationpolicyInformer := verificationpolicyinformer.Get(ctx)
secretinformer := secretinformer.Get(ctx)
tracerProvider := tracing.New(TracerProviderName, logger.Named("tracing"))
- pipelinerunmetricsRecorder := pipelinerunmetrics.Get(ctx)
//nolint:contextcheck // OnStore methods does not support context as a parameter
- configStore := config.NewStore(logger.Named("config-store"),
- pipelinerunmetrics.OnStore(logger, pipelinerunmetricsRecorder),
- tracerProvider.OnStore(secretinformer.Lister()),
- )
+ configStore := config.NewStore(logger.Named("config-store"), pipelinerunmetrics.MetricsOnStore(logger), tracerProvider.OnStore(secretinformer.Lister()))
configStore.WatchConfigs(cmw)
c := &Reconciler{
@@ -80,7 +76,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
customRunLister: customRunInformer.Lister(),
verificationPolicyLister: verificationpolicyInformer.Lister(),
cloudEventClient: cloudeventclient.Get(ctx),
- metrics: pipelinerunmetricsRecorder,
+ metrics: pipelinerunmetrics.Get(ctx),
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()),
tracerProvider: tracerProvider,
diff --git a/upstream/pkg/reconciler/pipelinerun/pipelinerun.go b/upstream/pkg/reconciler/pipelinerun/pipelinerun.go
index 8a67c183a7f..c2f1b2bf353 100644
--- a/upstream/pkg/reconciler/pipelinerun/pipelinerun.go
+++ b/upstream/pkg/reconciler/pipelinerun/pipelinerun.go
@@ -52,8 +52,7 @@ import (
tresources "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/remote"
- resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/substitution"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/pkg/workspace"
@@ -289,8 +288,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, pr *v1.PipelineRun) pkgr
if taskTimeout.Duration == config.NoTimeoutDuration {
waitTime = time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute
}
- } else if pr.Status.FinallyStartTime != nil && pr.FinallyTimeout() != nil &&
- pr.FinallyTimeout().Duration != config.NoTimeoutDuration {
+ } else if pr.Status.FinallyStartTime != nil && pr.FinallyTimeout() != nil {
finallyWaitTime := pr.FinallyTimeout().Duration - c.Clock.Since(pr.Status.FinallyStartTime.Time)
if finallyWaitTime < waitTime {
waitTime = finallyWaitTime
@@ -340,8 +338,7 @@ func (c *Reconciler) resolvePipelineState(
tasks []v1.PipelineTask,
pipelineMeta *metav1.ObjectMeta,
pr *v1.PipelineRun,
- pst resources.PipelineRunState,
-) (resources.PipelineRunState, error) {
+ pst resources.PipelineRunState) (resources.PipelineRunState, error) {
ctx, span := c.tracerProvider.Tracer(TracerName).Start(ctx, "resolvePipelineState")
defer span.End()
// Resolve each task individually because they each could have a different reference context (remote or local).
@@ -376,7 +373,7 @@ func (c *Reconciler) resolvePipelineState(
pst,
)
if err != nil {
- if resolutioncommon.IsErrTransient(err) {
+ if tresources.IsErrTransient(err) {
return nil, err
}
if errors.Is(err, remote.ErrRequestInProgress) {
@@ -490,8 +487,8 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1.PipelineRun, getPipel
if err := resources.ValidateRequiredParametersProvided(&pipelineSpec.Params, &pr.Spec.Params); err != nil {
// This Run has failed, so we need to mark it as failed and stop reconciling it
pr.Status.MarkFailed(v1.PipelineRunReasonParameterMissing.String(),
- "PipelineRun %s/%s is missing some parameters required by Pipeline %s/%s: %s",
- pr.Namespace, pr.Name, pr.Namespace, pipelineMeta.Name, err)
+ "PipelineRun %s parameters is missing some parameters required by Pipeline %s's parameters: %s",
+ pr.Namespace, pr.Name, err)
return controller.NewPermanentError(err)
}
@@ -903,13 +900,6 @@ func (c *Reconciler) runNextSchedulableTask(ctx context.Context, pr *v1.Pipeline
// propagate previous task results
resources.PropagateResults(rpt, pipelineRunFacts.State)
- // propagate previous task artifacts
- err = resources.PropagateArtifacts(rpt, pipelineRunFacts.State)
- if err != nil {
- logger.Errorf("Failed to propagate artifacts due to error: %v", err)
- return controller.NewPermanentError(err)
- }
-
// Validate parameter types in matrix after apply substitutions from Task Results
if rpt.PipelineTask.IsMatrixed() {
if err := resources.ValidateParameterTypesInMatrix(pipelineRunFacts.State); err != nil {
@@ -1326,11 +1316,6 @@ func propagatePipelineNameLabelToPipelineRun(pr *v1.PipelineRun) error {
if pr.ObjectMeta.Labels == nil {
pr.ObjectMeta.Labels = make(map[string]string)
}
-
- if _, ok := pr.ObjectMeta.Labels[pipeline.PipelineLabelKey]; ok {
- return nil
- }
-
switch {
case pr.Spec.PipelineRef != nil && pr.Spec.PipelineRef.Name != "":
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Spec.PipelineRef.Name
@@ -1338,20 +1323,6 @@ func propagatePipelineNameLabelToPipelineRun(pr *v1.PipelineRun) error {
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Name
case pr.Spec.PipelineRef != nil && pr.Spec.PipelineRef.Resolver != "":
pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = pr.Name
-
- // https://tekton.dev/docs/pipelines/cluster-resolver/#pipeline-resolution
- var kind, name string
- for _, param := range pr.Spec.PipelineRef.Params {
- if param.Name == "kind" {
- kind = param.Value.StringVal
- }
- if param.Name == "name" {
- name = param.Value.StringVal
- }
- }
- if kind == "pipeline" {
- pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = name
- }
default:
return fmt.Errorf("pipelineRun %s not providing PipelineRef or PipelineSpec", pr.Name)
}
@@ -1367,7 +1338,6 @@ func getTaskrunLabels(pr *v1.PipelineRun, pipelineTaskName string, includePipeli
}
}
labels[pipeline.PipelineRunLabelKey] = pr.Name
- labels[pipeline.PipelineRunUIDLabelKey] = string(pr.UID)
if pipelineTaskName != "" {
labels[pipeline.PipelineTaskLabelKey] = pipelineTaskName
}
@@ -1464,9 +1434,7 @@ func storePipelineSpecAndMergeMeta(ctx context.Context, pr *v1.PipelineRun, ps *
// Propagate labels from Pipeline to PipelineRun. PipelineRun labels take precedences over Pipeline.
pr.ObjectMeta.Labels = kmap.Union(meta.Labels, pr.ObjectMeta.Labels)
- if len(meta.Name) > 0 {
- pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = meta.Name
- }
+ pr.ObjectMeta.Labels[pipeline.PipelineLabelKey] = meta.Name
// Propagate annotations from Pipeline to PipelineRun. PipelineRun annotations take precedences over Pipeline.
pr.ObjectMeta.Annotations = kmap.Union(kmap.ExcludeKeys(meta.Annotations, tknreconciler.KubectlLastAppliedAnnotationKey), pr.ObjectMeta.Annotations)
@@ -1575,8 +1543,6 @@ func filterCustomRunsForPipelineRunStatus(logger *zap.SugaredLogger, pr *v1.Pipe
gvks = append(gvks, v1beta1.SchemeGroupVersion.WithKind(customRun))
}
- // NAMES are names
-
return names, taskLabels, gvks, statuses
}
diff --git a/upstream/pkg/reconciler/pipelinerun/pipelinerun_test.go b/upstream/pkg/reconciler/pipelinerun/pipelinerun_test.go
index 780249b6a40..e71ee79a823 100644
--- a/upstream/pkg/reconciler/pipelinerun/pipelinerun_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/pipelinerun_test.go
@@ -226,7 +226,7 @@ func getTaskRuns(ctx context.Context, t *testing.T, clients test.Clients, namesp
outputs := make(map[string]*v1.TaskRun)
for _, item := range taskRuns.Items {
tr := item
- outputs[item.Name] = &tr
+ outputs[tr.Name] = &tr
}
return outputs
@@ -244,7 +244,6 @@ func TestReconcile(t *testing.T) {
metadata:
name: test-pipeline-run-success
namespace: foo
- uid: bar
spec:
params:
- name: bar
@@ -398,8 +397,6 @@ spec:
name: unit-test-task
kind: Task
`)
- expectedTaskRun.Labels["tekton.dev/pipelineRunUID"] = "bar"
- expectedTaskRun.OwnerReferences[0].UID = "bar"
// ignore IgnoreUnexported ignore both after and before steps fields
if d := cmp.Diff(expectedTaskRun, actual, ignoreTypeMeta, ignoreResourceVersion); d != "" {
t.Errorf("expected to see TaskRun %v created. Diff %s", expectedTaskRun, diff.PrintWantGot(d))
@@ -430,7 +427,6 @@ func TestReconcile_V1Beta1CustomTask(t *testing.T) {
simpleCustomTaskPRYAML := `metadata:
name: test-pipelinerun
namespace: namespace
- uid: bar
spec:
pipelineSpec:
tasks:
@@ -450,7 +446,6 @@ spec:
tekton.dev/pipeline: test-pipelinerun
tekton.dev/pipelineRun: test-pipelinerun
tekton.dev/pipelineTask: custom-task
- tekton.dev/pipelineRunUID: bar
name: test-pipelinerun-custom-task
namespace: namespace
ownerReferences:
@@ -459,7 +454,6 @@ spec:
controller: true
kind: PipelineRun
name: test-pipelinerun
- uid: bar
spec:
params:
- name: param1
@@ -962,26 +956,10 @@ spec:
"Warning Failed [User error] PipelineRun foo/embedded-pipeline-mismatching-param-type parameters have mismatching types with Pipeline foo/embedded-pipeline-mismatching-param-type's parameters: parameters have inconsistent types : [some-param]",
},
}, {
- name: "invalid-pipeline-run-missing-params-with-ref-shd-stop-reconciling",
- pipelineRun: parse.MustParseV1PipelineRun(t, `
-metadata:
- name: pipelinerun-missing-params-1
- namespace: foo
-spec:
- pipelineRef:
- name: a-pipeline-with-array-params
-`),
- reason: v1.PipelineRunReasonParameterMissing.String(),
- permanentError: true,
- wantEvents: []string{
- "Normal Started",
- "Warning Failed [User error] PipelineRun foo/pipelinerun-missing-params-1 is missing some parameters required by Pipeline foo/a-pipeline-with-array-params: pipelineRun missing parameters: [some-param]",
- },
- }, {
- name: "invalid-pipeline-run-missing-params-with-spec-shd-stop-reconciling",
+ name: "invalid-pipeline-run-missing-params-shd-stop-reconciling",
pipelineRun: parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
metadata:
- name: pipelinerun-missing-params-2
+ name: pipelinerun-missing-params
namespace: foo
spec:
pipelineSpec:
@@ -997,7 +975,7 @@ spec:
permanentError: true,
wantEvents: []string{
"Normal Started",
- "Warning Failed [User error] PipelineRun foo/pipelinerun-missing-params-2 is missing some parameters required by Pipeline foo/pipelinerun-missing-params-2: pipelineRun missing parameters: [some-param]",
+ "Warning Failed [User error] PipelineRun foo parameters is missing some parameters required by Pipeline pipelinerun-missing-params's parameters: pipelineRun missing parameters: [some-param]",
},
}, {
name: "invalid-pipeline-with-invalid-dag-graph",
@@ -2512,7 +2490,24 @@ spec:
}
func TestReconcileWithTimeoutDisabled(t *testing.T) {
- ps := []*v1.Pipeline{parse.MustParseV1Pipeline(t, `
+ testCases := []struct {
+ name string
+ timeout time.Duration
+ }{
+ {
+ name: "pipeline timeout is 24h",
+ timeout: 24 * time.Hour,
+ },
+ {
+ name: "pipeline timeout is way longer than 24h",
+ timeout: 360 * time.Hour,
+ },
+ }
+
+ for _, tc := range testCases {
+ startTime := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Add(-3 * tc.timeout)
+ t.Run(tc.name, func(t *testing.T) {
+ ps := []*v1.Pipeline{parse.MustParseV1Pipeline(t, `
metadata:
name: test-pipeline
namespace: foo
@@ -2524,22 +2519,8 @@ spec:
- name: hello-world-2
taskRef:
name: hello-world
-`), parse.MustParseV1Pipeline(t, `
-metadata:
- name: test-pipeline-with-finally
- namespace: foo
-spec:
- tasks:
- - name: hello-world-1
- taskRef:
- name: hello-world
- finally:
- - name: hello-world-2
- taskRef:
- name: hello-world
`)}
-
- prs := []*v1.PipelineRun{parse.MustParseV1PipelineRun(t, `
+ prs := []*v1.PipelineRun{parse.MustParseV1PipelineRun(t, `
metadata:
name: test-pipeline-run-with-timeout-disabled
namespace: foo
@@ -2552,108 +2533,32 @@ spec:
pipeline: 0h0m0s
status:
startTime: "2021-12-30T00:00:00Z"
-`), parse.MustParseV1PipelineRun(t, `
-metadata:
- name: test-pipeline-run-with-timeout-disabled
- namespace: foo
-spec:
- pipelineRef:
- name: test-pipeline-with-finally
- taskRunTemplate:
- serviceAccountName: test-sa
- timeouts:
- pipeline: 96h0m0s
- tasks: 96h0m0s
-status:
- startTime: "2021-12-30T00:00:00Z"
- finallyStartTime: "2021-12-30T23:44:59Z"
`)}
- ts := []*v1.Task{simpleHelloWorldTask}
+ ts := []*v1.Task{simpleHelloWorldTask}
- trs := []*v1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-hello-world-1", "foo", "test-pipeline-run-with-timeout-disabled",
- "test-pipeline", "hello-world-1", false), `
-spec:
- serviceAccountName: test-sa
- taskRef:
- name: hello-world
- kind: Task
-`), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-with-finally-hello-world-1", "foo", "test-pipeline-run-with-timeout-disabled",
- "test-pipeline-with-finally", "hello-world-1", false), `
-spec:
- startTime: "2021-12-30T00:00:00Z"
- serviceAccountName: test-sa
- taskRef:
- name: hello-world
- kind: Task
- conditions:
- - lastTransitionTime: null
- status: "True"
- type: Succeeded
-`), mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-with-finally-hello-world-2", "foo", "test-pipeline-run-with-timeout-disabled",
- "test-pipeline-with-finally", "hello-world-2", false), `
+ trs := []*v1.TaskRun{mustParseTaskRunWithObjectMeta(t, taskRunObjectMeta("test-pipeline-run-with-timeout-hello-world-1", "foo", "test-pipeline-run-with-timeout-disabled",
+ "test-pipeline", "hello-world-1", false), `
spec:
serviceAccountName: test-sa
taskRef:
name: hello-world
kind: Task
`)}
-
- testCases := []struct {
- name string
- timeout time.Duration
- trs []*v1.TaskRun
- ts []*v1.Task
- ps []*v1.Pipeline
- prs []*v1.PipelineRun
- }{
- {
- name: "pipeline timeout is 24h",
- timeout: 24 * time.Hour,
- trs: []*v1.TaskRun{trs[0]},
- ts: []*v1.Task{ts[0]},
- prs: []*v1.PipelineRun{prs[0]},
- ps: []*v1.Pipeline{ps[0]},
- },
- {
- name: "pipeline timeout is way longer than 24h",
- timeout: 360 * time.Hour,
- trs: []*v1.TaskRun{trs[0]},
- ts: []*v1.Task{ts[0]},
- prs: []*v1.PipelineRun{prs[0]},
- ps: []*v1.Pipeline{ps[0]},
- },
- {
- name: "pipeline timeout is 24h, and the final task timeout is 0s",
- timeout: 24 * time.Hour,
- trs: []*v1.TaskRun{trs[1], trs[2]},
- ts: []*v1.Task{ts[0]},
- prs: []*v1.PipelineRun{prs[1]},
- ps: []*v1.Pipeline{ps[1]},
- },
- }
-
- for _, tc := range testCases {
- startTime := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Add(-3 * tc.timeout)
- notAdjustedCreationTimestamp := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC).Add(tc.timeout)
- t.Run(tc.name, func(t *testing.T) {
start := metav1.NewTime(startTime)
- tc.prs[0].Status.StartTime = &start
- for i := range tc.trs {
- tc.trs[i].CreationTimestamp = metav1.Time{Time: notAdjustedCreationTimestamp}
- }
+ prs[0].Status.StartTime = &start
d := test.Data{
- PipelineRuns: tc.prs,
- Pipelines: tc.ps,
- Tasks: tc.ts,
- TaskRuns: tc.trs,
+ PipelineRuns: prs,
+ Pipelines: ps,
+ Tasks: ts,
+ TaskRuns: trs,
}
prt := newPipelineRunTest(t, d)
defer prt.Cancel()
c := prt.TestAssets.Controller
clients := prt.TestAssets.Clients
- reconcileError := c.Reconciler.Reconcile(prt.TestAssets.Ctx, fmt.Sprintf("%s/%s", "foo", tc.prs[0].Name))
+ reconcileError := c.Reconciler.Reconcile(prt.TestAssets.Ctx, "foo/test-pipeline-run-with-timeout-disabled")
if reconcileError == nil {
t.Errorf("expected error, but got nil")
}
@@ -2663,7 +2568,7 @@ spec:
t.Errorf("Expected a positive requeue duration but got %s", requeueDuration.String())
}
prt.Test.Logf("Getting reconciled run")
- reconciledRun, err := clients.Pipeline.TektonV1().PipelineRuns("foo").Get(prt.TestAssets.Ctx, tc.prs[0].Name, metav1.GetOptions{})
+ reconciledRun, err := clients.Pipeline.TektonV1().PipelineRuns("foo").Get(prt.TestAssets.Ctx, "test-pipeline-run-with-timeout-disabled", metav1.GetOptions{})
if err != nil {
prt.Test.Errorf("Somehow had error getting reconciled run out of fake client: %s", err)
}
@@ -2786,7 +2691,7 @@ spec:
prt.Test.Logf("Getting events")
// Check generated events match what's expected
if err := k8sevent.CheckEventsOrdered(prt.Test, prt.TestAssets.Recorder.Events, "test-pipeline-run-with-timeout", wantEvents); err != nil {
- prt.Test.Error(err.Error())
+ prt.Test.Errorf(err.Error())
}
// The PipelineRun should be timed out.
@@ -3569,7 +3474,7 @@ spec:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, prName, wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
// Turn off failing reactor and retry reconciliation
@@ -3687,7 +3592,7 @@ spec:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, prName, wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
// Turn off failing reactor and retry reconciliation
@@ -3784,6 +3689,7 @@ metadata:
PipelineRunAnnotation: PipelineRunValue
labels:
PipelineRunLabel: PipelineRunValue
+ tekton.dev/pipeline: WillNotBeUsed
name: test-pipeline-run-with-labels
namespace: foo
spec:
@@ -6914,7 +6820,7 @@ metadata:
t.Errorf("storePipelineSpec() error = %v", err)
}
if d := cmp.Diff(tc.wantPipelineRun, pr); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
// mock second reconcile
@@ -6922,7 +6828,7 @@ metadata:
t.Errorf("storePipelineSpec() error = %v", err)
}
if d := cmp.Diff(tc.wantPipelineRun, pr); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -6946,10 +6852,10 @@ func Test_storePipelineSpec_metadata(t *testing.T) {
t.Errorf("storePipelineSpecAndMergeMeta error = %v", err)
}
if d := cmp.Diff(wantedlabels, pr.ObjectMeta.Labels); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
if d := cmp.Diff(wantedannotations, pr.ObjectMeta.Annotations); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
}
@@ -8339,73 +8245,6 @@ spec:
}
}
-func TestReconciler_ReconcileKind_PipelineRunLabels(t *testing.T) {
- names.TestingSeed()
-
- pipelineName := "p-pipelinetask"
- pipelineRunName := "pr-pipelinetask"
-
- ps := []*v1.Pipeline{parse.MustParseV1Pipeline(t, `
-metadata:
- name: p-pipelinetask
- namespace: foo
-spec:
- tasks:
- - name: task1
- taskRef:
- name: mytask
-`)}
-
- prs := []*v1.PipelineRun{parse.MustParseV1PipelineRun(t, `
-metadata:
- name: pr-pipelinetask
- namespace: foo
-spec:
- pipelineRef:
- params:
- - name: kind
- value: pipeline
- - name: name
- value: p-pipelinetask
- - name: namespace
- value: foo
- resolver: cluster
-`)}
-
- ts := []*v1.Task{{ObjectMeta: baseObjectMeta("mytask", "foo")}}
-
- trs := []*v1.TaskRun{mustParseTaskRunWithObjectMeta(t,
- taskRunObjectMeta(pipelineRunName+"-task1-xxyy", "foo", pipelineRunName, pipelineName, "task1", false),
- `
-spec:
- serviceAccountName: test-sa
- taskRef:
- name: mytask
-status:
- conditions:
- - reason: "done"
- status: "True"
- type: Succeeded
-`)}
-
- d := test.Data{
- PipelineRuns: prs,
- Pipelines: ps,
- Tasks: ts,
- TaskRuns: trs,
- }
- prt := newPipelineRunTest(t, d)
- defer prt.Cancel()
-
- actualPipelineRun, _ := prt.reconcileRun("foo", pipelineRunName, []string{}, false)
- if actualPipelineRun.Labels == nil {
- t.Fatalf("Pelinerun should have labels")
- }
- if v, ok := actualPipelineRun.Labels[pipeline.PipelineLabelKey]; !ok || v != pipelineName {
- t.Fatalf("The expected name of the pipeline is %s, but the actual name is %s", pipelineName, v)
- }
-}
-
// newPipelineRunTest returns PipelineRunTest with a new PipelineRun controller created with specified state through data
// This PipelineRunTest can be reused for multiple PipelineRuns by calling reconcileRun for each pipelineRun
func newPipelineRunTest(t *testing.T, data test.Data) *PipelineRunTest {
@@ -8449,7 +8288,7 @@ func (prt PipelineRunTest) reconcileRun(namespace, pipelineRunName string, wantE
// Check generated events match what's expected
if len(wantEvents) > 0 {
if err := k8sevent.CheckEventsOrdered(prt.Test, prt.TestAssets.Recorder.Events, pipelineRunName, wantEvents); err != nil {
- prt.Test.Error(err.Error())
+ prt.Test.Errorf(err.Error())
}
}
@@ -8646,132 +8485,6 @@ spec:
}
}
-func TestReconcile_RemotePipeline_PipelineNameLabel(t *testing.T) {
- names.TestingSeed()
-
- namespace := "foo"
- prName := "test-pipeline-run-success"
- trName := "test-pipeline-run-success-unit-test-1"
-
- prs := []*v1.PipelineRun{parse.MustParseV1PipelineRun(t, `
-metadata:
- name: test-pipeline-run-success
- namespace: foo
-spec:
- pipelineRef:
- resolver: bar
- taskRunTemplate:
- serviceAccountName: test-sa
- timeout: 1h0m0s
-`)}
- ps := parse.MustParseV1Pipeline(t, `
-metadata:
- name: test-pipeline
- namespace: foo
-spec:
- tasks:
- - name: unit-test-1
- taskRef:
- resolver: bar
-`)
- notNamePipeline := parse.MustParseV1Pipeline(t, `
-metadata:
- namespace: foo
-spec:
- tasks:
- - name: unit-test-1
- taskRef:
- resolver: bar
-`)
-
- remoteTask := parse.MustParseV1Task(t, `
-metadata:
- name: unit-test-task
- namespace: foo
-`)
-
- pipelineBytes, err := yaml.Marshal(ps)
- if err != nil {
- t.Fatal("fail to marshal pipeline", err)
- }
- notNamePipelineBytes, err := yaml.Marshal(notNamePipeline)
- if err != nil {
- t.Fatal("fail to marshal pipeline", err)
- }
-
- taskBytes, err := yaml.Marshal(remoteTask)
- if err != nil {
- t.Fatal("fail to marshal task", err)
- }
-
- pipelineReq := getResolvedResolutionRequest(t, "bar", pipelineBytes, "foo", prName)
- notNamePipelineReq := getResolvedResolutionRequest(t, "bar", notNamePipelineBytes, "foo", prName)
- taskReq := getResolvedResolutionRequest(t, "bar", taskBytes, "foo", trName)
-
- tcs := []struct {
- name string
- wantPipelineName string
- pipelineReq resolutionv1beta1.ResolutionRequest
- taskReq resolutionv1beta1.ResolutionRequest
- }{{
- name: "remote pipeline contains name",
- // Use the name from the remote pipeline
- wantPipelineName: ps.Name,
- pipelineReq: pipelineReq,
- taskReq: taskReq,
- }, {
- name: "remote pipeline without name",
- wantPipelineName: prs[0].Name,
- pipelineReq: notNamePipelineReq,
- taskReq: taskReq,
- }}
-
- for _, tc := range tcs {
- // Unlike the tests above, we do *not* locally define our pipeline or unit-test task.
- d := test.Data{
- PipelineRuns: prs,
- ServiceAccounts: []*corev1.ServiceAccount{{
- ObjectMeta: metav1.ObjectMeta{Name: prs[0].Spec.TaskRunTemplate.ServiceAccountName, Namespace: namespace},
- }},
- ConfigMaps: []*corev1.ConfigMap{
- {
- ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
- Data: map[string]string{
- "enable-api-fields": "beta",
- },
- },
- },
- ResolutionRequests: []*resolutionv1beta1.ResolutionRequest{&tc.taskReq, &tc.pipelineReq},
- }
-
- prt := newPipelineRunTest(t, d)
- defer prt.Cancel()
-
- wantEvents := []string{
- "Normal Started",
- "Normal Running Tasks Completed: 0",
- }
- reconciledRun, _ := prt.reconcileRun(namespace, prName, wantEvents, false)
- if len(reconciledRun.Labels) == 0 {
- t.Errorf("the pipeline label in pr is not set")
- }
- pName := reconciledRun.Labels[pipeline.PipelineLabelKey]
- if reconciledRun.Labels[pipeline.PipelineLabelKey] != tc.wantPipelineName {
- t.Errorf("want pipeline name %s, but got %s", tc.wantPipelineName, pName)
- }
-
- // Verify the pipeline name label after the second `reconcile`, to prevent it from being overwritten again.
- reconciledRun, _ = prt.reconcileRun(namespace, prName, wantEvents, false)
- if len(reconciledRun.Labels) == 0 {
- t.Errorf("the pipeline label in pr is not set")
- }
- pName = reconciledRun.Labels[pipeline.PipelineLabelKey]
- if reconciledRun.Labels[pipeline.PipelineLabelKey] != tc.wantPipelineName {
- t.Errorf("want pipeline name %s, but got %s", tc.wantPipelineName, pName)
- }
- }
-}
-
// TestReconcile_OptionalWorkspacesOmitted checks that an optional workspace declared by
// a Task and a Pipeline can be omitted by a PipelineRun and the run will still start
// successfully without an error.
@@ -9340,13 +9053,11 @@ func taskRunObjectMeta(trName, ns, prName, pipelineName, pipelineTaskName string
APIVersion: "tekton.dev/v1",
Controller: &trueb,
BlockOwnerDeletion: &trueb,
- UID: "",
}},
Labels: map[string]string{
- pipeline.PipelineLabelKey: pipelineName,
- pipeline.PipelineRunLabelKey: prName,
- pipeline.PipelineTaskLabelKey: pipelineTaskName,
- pipeline.PipelineRunUIDLabelKey: "",
+ pipeline.PipelineLabelKey: pipelineName,
+ pipeline.PipelineRunLabelKey: prName,
+ pipeline.PipelineTaskLabelKey: pipelineTaskName,
},
Annotations: map[string]string{},
}
@@ -12172,7 +11883,7 @@ spec:
type: array
steps:
- name: produce-a-list-of-platforms
- image: docker.io/library/bash:5.2.26
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"linux\",\"mac\",\"windows\"]" | tee $(results.platforms.path)
@@ -12657,7 +12368,7 @@ spec:
type: array
steps:
- name: produce-a-list-of-platforms
- image: docker.io/library/bash:5.2.26
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"linux\",\"mac\",\"windows\"]" | tee $(results.platforms.path)
@@ -16887,7 +16598,7 @@ spec:
// the ResolutionRequest's name is generated by resolverName, namespace and runName.
func getResolvedResolutionRequest(t *testing.T, resolverName string, resourceBytes []byte, namespace string, runName string) resolutionv1beta1.ResolutionRequest {
t.Helper()
- name, err := remoteresource.GenerateDeterministicNameFromSpec(resolverName, namespace+"/"+runName, &resolutionv1beta1.ResolutionRequestSpec{})
+ name, err := remoteresource.GenerateDeterministicName(resolverName, namespace+"/"+runName, nil)
if err != nil {
t.Errorf("error generating name for %s/%s/%s: %v", resolverName, namespace, runName, err)
}
@@ -17683,7 +17394,7 @@ func Test_runNextSchedulableTask(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: "task2",
ResourceVersion: "00002",
- Labels: map[string]string{"tekton.dev/pipelineRun": "", "tekton.dev/pipelineTask": "task2", "tekton.dev/pipelineRunUID": ""},
+ Labels: map[string]string{"tekton.dev/pipelineRun": "", "tekton.dev/pipelineTask": "task2"},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "tekton.dev/v1",
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/apply.go b/upstream/pkg/reconciler/pipelinerun/resources/apply.go
index 6c635357adf..3aeb0accebd 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/apply.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/apply.go
@@ -18,7 +18,6 @@ package resources
import (
"context"
- "encoding/json"
"fmt"
"strconv"
"strings"
@@ -40,11 +39,13 @@ const (
objectIndividualVariablePattern = "params.%s.%s"
)
-var paramPatterns = []string{
- "params.%s",
- "params[%q]",
- "params['%s']",
-}
+var (
+ paramPatterns = []string{
+ "params.%s",
+ "params[%q]",
+ "params['%s']",
+ }
+)
// ApplyParameters applies the params from a PipelineRun.Params to a PipelineSpec.
func ApplyParameters(ctx context.Context, p *v1.PipelineSpec, pr *v1.PipelineRun) *v1.PipelineSpec {
@@ -62,7 +63,7 @@ func ApplyParameters(ctx context.Context, p *v1.PipelineSpec, pr *v1.PipelineRun
switch p.Default.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
- for i := range len(p.Default.ArrayVal) {
+ for i := 0; i < len(p.Default.ArrayVal); i++ {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Default.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ArrayVal
@@ -110,7 +111,7 @@ func paramsFromPipelineRun(ctx context.Context, pr *v1.PipelineRun) (map[string]
switch p.Value.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
- for i := range len(p.Value.ArrayVal) {
+ for i := 0; i < len(p.Value.ArrayVal); i++ {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Value.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ArrayVal
@@ -248,11 +249,8 @@ func ApplyTaskResults(targets PipelineRunState, resolvedResultRefs ResolvedResul
}
}
pipelineTask.When = pipelineTask.When.ReplaceVariables(stringReplacements, arrayReplacements)
- if pipelineTask.TaskRef != nil {
- if pipelineTask.TaskRef.Params != nil {
- pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
- }
- pipelineTask.TaskRef.Name = substitution.ApplyReplacements(pipelineTask.TaskRef.Name, stringReplacements)
+ if pipelineTask.TaskRef != nil && pipelineTask.TaskRef.Params != nil {
+ pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
}
pipelineTask.DisplayName = substitution.ApplyReplacements(pipelineTask.DisplayName, stringReplacements)
for i, workspace := range pipelineTask.Workspaces {
@@ -270,11 +268,8 @@ func ApplyPipelineTaskStateContext(state PipelineRunState, replacements map[stri
pipelineTask := resolvedPipelineRunTask.PipelineTask.DeepCopy()
pipelineTask.Params = pipelineTask.Params.ReplaceVariables(replacements, nil, nil)
pipelineTask.When = pipelineTask.When.ReplaceVariables(replacements, nil)
- if pipelineTask.TaskRef != nil {
- if pipelineTask.TaskRef.Params != nil {
- pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(replacements, nil, nil)
- }
- pipelineTask.TaskRef.Name = substitution.ApplyReplacements(pipelineTask.TaskRef.Name, replacements)
+ if pipelineTask.TaskRef != nil && pipelineTask.TaskRef.Params != nil {
+ pipelineTask.TaskRef.Params = pipelineTask.TaskRef.Params.ReplaceVariables(replacements, nil, nil)
}
pipelineTask.DisplayName = substitution.ApplyReplacements(pipelineTask.DisplayName, replacements)
resolvedPipelineRunTask.PipelineTask = pipelineTask
@@ -316,11 +311,8 @@ func ApplyReplacements(p *v1.PipelineSpec, replacements map[string]string, array
p.Tasks[i].Workspaces[j].SubPath = substitution.ApplyReplacements(p.Tasks[i].Workspaces[j].SubPath, replacements)
}
p.Tasks[i].When = p.Tasks[i].When.ReplaceVariables(replacements, arrayReplacements)
- if p.Tasks[i].TaskRef != nil {
- if p.Tasks[i].TaskRef.Params != nil {
- p.Tasks[i].TaskRef.Params = p.Tasks[i].TaskRef.Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
- }
- p.Tasks[i].TaskRef.Name = substitution.ApplyReplacements(p.Tasks[i].TaskRef.Name, replacements)
+ if p.Tasks[i].TaskRef != nil && p.Tasks[i].TaskRef.Params != nil {
+ p.Tasks[i].TaskRef.Params = p.Tasks[i].TaskRef.Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
}
p.Tasks[i] = propagateParams(p.Tasks[i], replacements, arrayReplacements, objectReplacements)
}
@@ -339,11 +331,8 @@ func ApplyReplacements(p *v1.PipelineSpec, replacements map[string]string, array
p.Finally[i].Workspaces[j].SubPath = substitution.ApplyReplacements(p.Finally[i].Workspaces[j].SubPath, replacements)
}
p.Finally[i].When = p.Finally[i].When.ReplaceVariables(replacements, arrayReplacements)
- if p.Finally[i].TaskRef != nil {
- if p.Finally[i].TaskRef.Params != nil {
- p.Finally[i].TaskRef.Params = p.Finally[i].TaskRef.Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
- }
- p.Finally[i].TaskRef.Name = substitution.ApplyReplacements(p.Finally[i].TaskRef.Name, replacements)
+ if p.Finally[i].TaskRef != nil && p.Finally[i].TaskRef.Params != nil {
+ p.Finally[i].TaskRef.Params = p.Finally[i].TaskRef.Params.ReplaceVariables(replacements, arrayReplacements, objectReplacements)
}
p.Finally[i] = propagateParams(p.Finally[i], replacements, arrayReplacements, objectReplacements)
}
@@ -443,40 +432,6 @@ func PropagateResults(rpt *ResolvedPipelineTask, runStates PipelineRunState) {
rpt.ResolvedTask.TaskSpec = resources.ApplyReplacements(rpt.ResolvedTask.TaskSpec, stringReplacements, arrayReplacements, map[string]map[string]string{})
}
-// PropagateArtifacts propagates artifact values from previous task runs into the TaskSpec of the current task.
-func PropagateArtifacts(rpt *ResolvedPipelineTask, runStates PipelineRunState) error {
- if rpt.ResolvedTask == nil || rpt.ResolvedTask.TaskSpec == nil {
- return nil
- }
- stringReplacements := map[string]string{}
- for taskName, artifacts := range runStates.GetTaskRunsArtifacts() {
- if artifacts != nil {
- for i, input := range artifacts.Inputs {
- ib, err := json.Marshal(input.Values)
- if err != nil {
- return err
- }
- stringReplacements[fmt.Sprintf("tasks.%s.inputs.%s", taskName, input.Name)] = string(ib)
- if i == 0 {
- stringReplacements[fmt.Sprintf("tasks.%s.inputs", taskName)] = string(ib)
- }
- }
- for i, output := range artifacts.Outputs {
- ob, err := json.Marshal(output.Values)
- if err != nil {
- return err
- }
- stringReplacements[fmt.Sprintf("tasks.%s.outputs.%s", taskName, output.Name)] = string(ob)
- if i == 0 {
- stringReplacements[fmt.Sprintf("tasks.%s.outputs", taskName)] = string(ob)
- }
- }
- }
- }
- rpt.ResolvedTask.TaskSpec = resources.ApplyReplacements(rpt.ResolvedTask.TaskSpec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
- return nil
-}
-
// ApplyTaskResultsToPipelineResults applies the results of completed TasksRuns and Runs to a Pipeline's
// list of PipelineResults, returning the computed set of PipelineRunResults. References to
// non-existent TaskResults or failed TaskRuns or Runs result in a PipelineResult being considered invalid
@@ -487,8 +442,7 @@ func ApplyTaskResultsToPipelineResults(
results []v1.PipelineResult,
taskRunResults map[string][]v1.TaskRunResult,
customTaskResults map[string][]v1beta1.CustomRunResult,
- taskstatus map[string]string,
-) ([]v1.PipelineRunResult, error) {
+ taskstatus map[string]string) ([]v1.PipelineRunResult, error) {
var runResults []v1.PipelineRunResult
var invalidPipelineResults []string
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/apply_test.go b/upstream/pkg/reconciler/pipelinerun/resources/apply_test.go
index 50222c8ef1d..e7d40c95d2e 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/apply_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/apply_test.go
@@ -1772,6 +1772,7 @@ func TestApplyParameters(t *testing.T) {
},
},
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -2081,6 +2082,7 @@ func TestApplyParameters_ArrayIndexing(t *testing.T) {
},
},
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
run := &v1.PipelineRun{
@@ -2333,6 +2335,7 @@ func TestApplyReplacementsMatrix(t *testing.T) {
},
},
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
run := &v1.PipelineRun{
@@ -3387,19 +3390,19 @@ func TestContext(t *testing.T) {
}
got := resources.ApplyContexts(&orig.Spec, orig.Name, tc.pr)
if d := cmp.Diff(tc.expected, got.Tasks[0].Params[0]); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tc.expected, got.Finally[0].Params[0]); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tc.expected, got.Tasks[0].Matrix.Params[0]); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tc.expectedDisplayName, got.Tasks[0].DisplayName); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
if d := cmp.Diff(tc.expectedDisplayName, got.Finally[0].DisplayName); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -3651,7 +3654,7 @@ func TestApplyPipelineTaskContexts(t *testing.T) {
t.Run(tc.description, func(t *testing.T) {
got := resources.ApplyPipelineTaskContexts(&tc.pt, tc.prstatus, tc.facts)
if d := cmp.Diff(&tc.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -3821,7 +3824,7 @@ func TestApplyFinallyResultsToPipelineResults(t *testing.T) {
t.Run(tc.description, func(t *testing.T) {
received, _ := resources.ApplyTaskResultsToPipelineResults(context.Background(), tc.results, tc.taskResults, tc.runResults, nil /* skippedTasks */)
if d := cmp.Diff(tc.expected, received); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -4157,7 +4160,7 @@ func TestApplyTaskResultsToPipelineResults_Success(t *testing.T) {
t.Errorf("Got unecpected error:%v", err)
}
if d := cmp.Diff(tc.expectedResults, received); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -4379,7 +4382,7 @@ func TestApplyTaskResultsToPipelineResults_Error(t *testing.T) {
}
if d := cmp.Diff(tc.expectedResults, received); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -4750,163 +4753,6 @@ func TestPropagateResults(t *testing.T) {
}
}
-func TestPropagateArtifacts(t *testing.T) {
- for _, tt := range []struct {
- name string
- resolvedTask *resources.ResolvedPipelineTask
- runStates resources.PipelineRunState
- expectedResolvedTask *resources.ResolvedPipelineTask
- wantErr bool
- }{
- {
- name: "not propagate artifact when resolved task is nil",
- resolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: nil,
- },
- runStates: resources.PipelineRunState{},
- expectedResolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: nil,
- },
- },
- {
- name: "not propagate artifact when taskSpec is nil",
- resolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: nil,
- },
- },
- runStates: resources.PipelineRunState{},
- expectedResolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: nil,
- },
- },
- },
- {
- name: "propagate artifacts inputs",
- resolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{
- {
- Name: "get-artifacts-inputs-from-pt1",
- Command: []string{"$(tasks.pt1.inputs.source)"},
- Args: []string{"$(tasks.pt1.inputs.source)"},
- },
- },
- },
- },
- },
- runStates: resources.PipelineRunState{
- {
- PipelineTask: &v1.PipelineTask{
- Name: "pt1",
- },
- TaskRuns: []*v1.TaskRun{
- {
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- },
- },
- },
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: nil,
- },
- },
- },
- },
- },
- },
- },
- expectedResolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{
- {
- Name: "get-artifacts-inputs-from-pt1",
- Command: []string{`[{"digest":{"sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},"uri":"pkg:example.github.com/inputs"}]`},
- Args: []string{`[{"digest":{"sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},"uri":"pkg:example.github.com/inputs"}]`},
- },
- },
- },
- },
- },
- },
- {
- name: "propagate artifacts outputs",
- resolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{
- {
- Name: "get-artifacts-outputs-from-pt1",
- Command: []string{"$(tasks.pt1.outputs.image)"},
- Args: []string{"$(tasks.pt1.outputs.image)"},
- },
- },
- },
- },
- },
- runStates: resources.PipelineRunState{
- {
- PipelineTask: &v1.PipelineTask{
- Name: "pt1",
- },
- TaskRuns: []*v1.TaskRun{
- {
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{
- {
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- },
- },
- },
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- },
- },
- },
- },
- },
- },
- expectedResolvedTask: &resources.ResolvedPipelineTask{
- ResolvedTask: &taskresources.ResolvedTask{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{
- {
- Name: "get-artifacts-outputs-from-pt1",
- Command: []string{`[{"digest":{"sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"},"uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}]`},
- Args: []string{`[{"digest":{"sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"},"uri":"pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}]`},
- },
- },
- },
- },
- },
- },
- } {
- t.Run(tt.name, func(t *testing.T) {
- err := resources.PropagateArtifacts(tt.resolvedTask, tt.runStates)
- if tt.wantErr != (err != nil) {
- t.Fatalf("Failed to check err want %t, got %v", tt.wantErr, err)
- }
- if d := cmp.Diff(tt.expectedResolvedTask, tt.resolvedTask); d != "" {
- t.Fatalf("TestPropagateArtifacts() %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
func TestApplyParametersToWorkspaceBindings(t *testing.T) {
testCases := []struct {
name string
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelineref.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelineref.go
index d295d0a3bc7..c6aab4a51b2 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelineref.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelineref.go
@@ -24,14 +24,12 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
rprp "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/pipelinespec"
"github.com/tektoncd/pipeline/pkg/remote"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/remote/resolution"
- remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- "github.com/tektoncd/pipeline/pkg/substitution"
+ "github.com/tektoncd/pipeline/pkg/remote/resolution"
+ remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -71,20 +69,8 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien
stringReplacements[k] = v
}
replacedParams := pr.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
- var url string
- // The name is url-like so its not a local reference.
- if err := v1.RefNameLikeUrl(pr.Name); err == nil {
- // apply variable replacements in the name.
- pr.Name = substitution.ApplyReplacements(pr.Name, stringReplacements)
- url = pr.Name
- }
- resolverPayload := remoteresource.ResolverPayload{
- ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
- Params: replacedParams,
- URL: url,
- },
- }
- resolver := resolution.NewResolver(requester, pipelineRun, string(pr.Resolver), resolverPayload)
+
+ resolver := resolution.NewResolver(requester, pipelineRun, string(pr.Resolver), "", "", replacedParams)
return resolvePipeline(ctx, resolver, name, namespace, k8s, tekton, verificationPolicies)
}
default:
@@ -157,24 +143,20 @@ func readRuntimeObjectAsPipeline(ctx context.Context, namespace string, obj runt
// Verify the Pipeline once we fetch from the remote resolution, mutating, validation and conversion of the pipeline should happen after the verification, since signatures are based on the remote pipeline contents
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Pipeline, so that it can undergo validation from validating admission webhooks
- // and mutation from mutating admission webhooks without actually creating the Pipeline on the cluster
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
+ // without actually creating the Pipeline on the cluster.
+ if err := apiserver.DryRunValidate(ctx, namespace, obj, tekton); err != nil {
return nil, nil, err
}
- if mutatedPipeline, ok := o.(*v1beta1.Pipeline); ok {
- mutatedPipeline.ObjectMeta = obj.ObjectMeta
- p := &v1.Pipeline{
- TypeMeta: metav1.TypeMeta{
- Kind: "Pipeline",
- APIVersion: "tekton.dev/v1",
- },
- }
- if err := mutatedPipeline.ConvertTo(ctx, p); err != nil {
- return nil, nil, fmt.Errorf("failed to convert v1beta1 obj %s into v1 Pipeline", mutatedPipeline.GetObjectKind().GroupVersionKind().String())
- }
- return p, &vr, nil
+ p := &v1.Pipeline{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Pipeline",
+ APIVersion: "tekton.dev/v1",
+ },
+ }
+ if err := obj.ConvertTo(ctx, p); err != nil {
+ return nil, nil, fmt.Errorf("failed to convert v1beta1 obj %s into v1 Pipeline", obj.GetObjectKind().GroupVersionKind().String())
}
+ return p, &vr, nil
case *v1.Pipeline:
// Cleanup object from things we don't care about
// FIXME: extract this in a function
@@ -183,14 +165,12 @@ func readRuntimeObjectAsPipeline(ctx context.Context, namespace string, obj runt
// Avoid forgetting to add it in the future when there is a v2 version, causing similar problems.
obj.SetDefaults(ctx)
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
+ // Issue a dry-run request to create the remote Pipeline, so that it can undergo validation from validating admission webhooks
+ // without actually creating the Pipeline on the cluster
+ if err := apiserver.DryRunValidate(ctx, namespace, obj, tekton); err != nil {
return nil, nil, err
}
- if mutatedPipeline, ok := o.(*v1.Pipeline); ok {
- mutatedPipeline.ObjectMeta = obj.ObjectMeta
- return mutatedPipeline, &vr, nil
- }
+ return obj, &vr, nil
}
return nil, nil, errors.New("resource is not a pipeline")
}
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelineref_test.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelineref_test.go
index e10fbcd9134..9b173bc49dc 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelineref_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelineref_test.go
@@ -34,18 +34,15 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/parse"
- resolution "github.com/tektoncd/pipeline/test/remoteresolution"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -347,8 +344,8 @@ func TestGetPipelineFunc_RemoteResolution(t *testing.T) {
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- resolved := resolution.NewResolvedResource([]byte(tc.pipelineYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+ resolved := test.NewResolvedResource([]byte(tc.pipelineYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
+ requester := test.NewRequester(resolved, nil)
fn := resources.GetPipelineFunc(ctx, nil, clients, requester, &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.PipelineRunSpec{
@@ -402,8 +399,8 @@ func TestGetPipelineFunc_RemoteResolution_ValidationFailure(t *testing.T) {
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- resolved := resolution.NewResolvedResource([]byte(tc.pipelineYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+ resolved := test.NewResolvedResource([]byte(tc.pipelineYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
+ requester := test.NewRequester(resolved, nil)
fn := resources.GetPipelineFunc(ctx, nil, clients, requester, &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.PipelineRunSpec{
@@ -438,7 +435,6 @@ func TestGetPipelineFunc_RemoteResolution_ReplacedParams(t *testing.T) {
ctx = config.ToContext(ctx, cfg)
pipeline := parse.MustParseV1PipelineAndSetDefaults(t, pipelineYAMLString)
pipelineRef := &v1.PipelineRef{
- Name: "https://foo/bar",
ResolverRef: v1.ResolverRef{
Resolver: "git",
Params: []v1.Param{{
@@ -456,21 +452,16 @@ func TestGetPipelineFunc_RemoteResolution_ReplacedParams(t *testing.T) {
pipelineYAMLString,
}, "\n")
- resolved := resolution.NewResolvedResource([]byte(pipelineYAML), nil, sampleRefSource.DeepCopy(), nil)
- requester := &resolution.Requester{
+ resolved := test.NewResolvedResource([]byte(pipelineYAML), nil, sampleRefSource.DeepCopy(), nil)
+ requester := &test.Requester{
ResolvedResource: resolved,
- ResolverPayload: resource.ResolverPayload{
- ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
- Params: v1.Params{{
- Name: "foo",
- Value: *v1.NewStructuredValues("bar"),
- }, {
- Name: "bar",
- Value: *v1.NewStructuredValues("test-pipeline"),
- }},
- URL: "https://foo/bar",
- },
- },
+ Params: v1.Params{{
+ Name: "foo",
+ Value: *v1.NewStructuredValues("bar"),
+ }, {
+ Name: "bar",
+ Value: *v1.NewStructuredValues("test-pipeline"),
+ }},
}
fn := resources.GetPipelineFunc(ctx, nil, clients, requester, &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{
@@ -547,8 +538,8 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) {
ctx = config.ToContext(ctx, cfg)
pipelineRef := &v1.PipelineRef{ResolverRef: v1.ResolverRef{Resolver: "git"}}
resolvesTo := []byte("INVALID YAML")
- res := resolution.NewResolvedResource(resolvesTo, nil, nil, nil)
- requester := resolution.NewRequester(res, nil, resource.ResolverPayload{})
+ resource := test.NewResolvedResource(resolvesTo, nil, nil, nil)
+ requester := test.NewRequester(resource, nil)
fn := resources.GetPipelineFunc(ctx, nil, clients, requester, &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.PipelineRunSpec{
@@ -563,6 +554,7 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) {
}
}
+//nolint:musttag
func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -585,8 +577,8 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedUnmatched := resolution.NewResolvedResource(unsignedPipelineBytes, nil, noMatchPolicyRefSource, nil)
- requesterUnmatched := resolution.NewRequester(resolvedUnmatched, nil, resource.ResolverPayload{})
+ resolvedUnmatched := test.NewResolvedResource(unsignedPipelineBytes, nil, noMatchPolicyRefSource, nil)
+ requesterUnmatched := test.NewRequester(resolvedUnmatched, nil)
signedPipeline, err := test.GetSignedV1beta1Pipeline(unsignedPipeline, signer, "signed")
if err != nil {
@@ -608,8 +600,8 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedMatched := resolution.NewResolvedResource(signedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterMatched := resolution.NewRequester(resolvedMatched, nil, resource.ResolverPayload{})
+ resolvedMatched := test.NewResolvedResource(signedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterMatched := test.NewRequester(resolvedMatched, nil)
pipelineRef := &v1.PipelineRef{
Name: signedPipeline.Name,
@@ -655,12 +647,12 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
warnPolicyRefSource := &v1.RefSource{
URI: " warnVP",
}
- resolvedUnsignedMatched := resolution.NewResolvedResource(unsignedPipelineBytes, nil, warnPolicyRefSource, nil)
- requesterUnsignedMatched := resolution.NewRequester(resolvedUnsignedMatched, nil, resource.ResolverPayload{})
+ resolvedUnsignedMatched := test.NewResolvedResource(unsignedPipelineBytes, nil, warnPolicyRefSource, nil)
+ requesterUnsignedMatched := test.NewRequester(resolvedUnsignedMatched, nil)
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
pipelinerun v1.PipelineRun
policies []*v1alpha1.VerificationPolicy
@@ -741,7 +733,7 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(context.Background(), tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
fn := resources.GetPipelineFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.pipelinerun, tc.policies)
gotResolvedPipeline, gotSource, gotVerificationResult, err := fn(ctx, pipelineRef.Name)
@@ -767,7 +759,9 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyNoError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
+ ctx := context.Background()
tektonclient := fake.NewSimpleClientset()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -784,8 +778,8 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
EntryPoint: "foo/bar",
}
- resolvedUnsigned := resolution.NewResolvedResource(unsignedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterUnsigned := resolution.NewRequester(resolvedUnsigned, nil, resource.ResolverPayload{})
+ resolvedUnsigned := test.NewResolvedResource(unsignedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterUnsigned := test.NewRequester(resolvedUnsigned, nil)
signedPipeline, err := test.GetSignedV1beta1Pipeline(unsignedPipeline, signer, "signed")
if err != nil {
@@ -803,8 +797,8 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedUnmatched := resolution.NewResolvedResource(signedPipelineBytes, nil, noMatchPolicyRefSource, nil)
- requesterUnmatched := resolution.NewRequester(resolvedUnmatched, nil, resource.ResolverPayload{})
+ resolvedUnmatched := test.NewResolvedResource(signedPipelineBytes, nil, noMatchPolicyRefSource, nil)
+ requesterUnmatched := test.NewRequester(resolvedUnmatched, nil)
modifiedPipeline := signedPipeline.DeepCopy()
modifiedPipeline.Annotations["random"] = "attack"
@@ -812,14 +806,14 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
if err != nil {
t.Fatal("fail to marshal pipeline", err)
}
- resolvedModified := resolution.NewResolvedResource(modifiedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterModified := resolution.NewRequester(resolvedModified, nil, resource.ResolverPayload{})
+ resolvedModified := test.NewResolvedResource(modifiedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterModified := test.NewRequester(resolvedModified, nil)
pipelineRef := &v1.PipelineRef{ResolverRef: v1.ResolverRef{Resolver: "git"}}
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
expectedVerificationResult *trustedresources.VerificationResult
}{
@@ -862,7 +856,7 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(context.Background(), tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
pr := &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.PipelineRunSpec{
@@ -885,6 +879,7 @@ func TestGetPipelineFunc_V1beta1Pipeline_VerifyError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -911,8 +906,8 @@ func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedUnmatched := resolution.NewResolvedResource(unsignedPipelineBytes, nil, noMatchPolicyRefSource, nil)
- requesterUnmatched := resolution.NewRequester(resolvedUnmatched, nil, resource.ResolverPayload{})
+ resolvedUnmatched := test.NewResolvedResource(unsignedPipelineBytes, nil, noMatchPolicyRefSource, nil)
+ requesterUnmatched := test.NewRequester(resolvedUnmatched, nil)
signedPipeline, err := getSignedV1Pipeline(unsignedV1Pipeline, signer, "signed")
if err != nil {
@@ -940,8 +935,8 @@ func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedMatched := resolution.NewResolvedResource(signedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterMatched := resolution.NewRequester(resolvedMatched, nil, resource.ResolverPayload{})
+ resolvedMatched := test.NewResolvedResource(signedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterMatched := test.NewRequester(resolvedMatched, nil)
pipelineRef := &v1.PipelineRef{
Name: signedPipeline.Name,
@@ -985,12 +980,12 @@ func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
warnPolicyRefSource := &v1.RefSource{
URI: " warnVP",
}
- resolvedUnsignedMatched := resolution.NewResolvedResource(unsignedPipelineBytes, nil, warnPolicyRefSource, nil)
- requesterUnsignedMatched := resolution.NewRequester(resolvedUnsignedMatched, nil, resource.ResolverPayload{})
+ resolvedUnsignedMatched := test.NewResolvedResource(unsignedPipelineBytes, nil, warnPolicyRefSource, nil)
+ requesterUnsignedMatched := test.NewRequester(resolvedUnsignedMatched, nil)
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
pipelinerun v1.PipelineRun
policies []*v1alpha1.VerificationPolicy
@@ -1071,7 +1066,7 @@ func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(context.Background(), tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
fn := resources.GetPipelineFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.pipelinerun, tc.policies)
gotResolvedPipeline, gotSource, gotVerificationResult, err := fn(ctx, pipelineRef.Name)
@@ -1097,7 +1092,9 @@ func TestGetPipelineFunc_V1Pipeline_VerifyNoError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
+ ctx := context.Background()
tektonclient := fake.NewSimpleClientset()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -1113,8 +1110,8 @@ func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
EntryPoint: "foo/bar",
}
- resolvedUnsigned := resolution.NewResolvedResource(unsignedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterUnsigned := resolution.NewRequester(resolvedUnsigned, nil, resource.ResolverPayload{})
+ resolvedUnsigned := test.NewResolvedResource(unsignedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterUnsigned := test.NewRequester(resolvedUnsigned, nil)
signedPipeline, err := getSignedV1Pipeline(unsignedV1Pipeline, signer, "signed")
if err != nil {
@@ -1132,8 +1129,8 @@ func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
},
EntryPoint: "foo/bar",
}
- resolvedUnmatched := resolution.NewResolvedResource(signedPipelineBytes, nil, noMatchPolicyRefSource, nil)
- requesterUnmatched := resolution.NewRequester(resolvedUnmatched, nil, resource.ResolverPayload{})
+ resolvedUnmatched := test.NewResolvedResource(signedPipelineBytes, nil, noMatchPolicyRefSource, nil)
+ requesterUnmatched := test.NewRequester(resolvedUnmatched, nil)
modifiedPipeline := signedPipeline.DeepCopy()
modifiedPipeline.Annotations["random"] = "attack"
@@ -1141,14 +1138,14 @@ func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
if err != nil {
t.Fatal("fail to marshal pipeline", err)
}
- resolvedModified := resolution.NewResolvedResource(modifiedPipelineBytes, nil, matchPolicyRefSource, nil)
- requesterModified := resolution.NewRequester(resolvedModified, nil, resource.ResolverPayload{})
+ resolvedModified := test.NewResolvedResource(modifiedPipelineBytes, nil, matchPolicyRefSource, nil)
+ requesterModified := test.NewRequester(resolvedModified, nil)
pipelineRef := &v1.PipelineRef{ResolverRef: v1.ResolverRef{Resolver: "git"}}
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
expectedVerificationResult *trustedresources.VerificationResult
}{
@@ -1191,7 +1188,7 @@ func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(context.Background(), tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
pr := &v1.PipelineRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.PipelineRunSpec{
@@ -1212,7 +1209,9 @@ func TestGetPipelineFunc_V1Pipeline_VerifyError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetPipelineFunc_GetFuncError(t *testing.T) {
+ ctx := context.Background()
tektonclient := fake.NewSimpleClientset()
_, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources")
@@ -1222,8 +1221,8 @@ func TestGetPipelineFunc_GetFuncError(t *testing.T) {
t.Fatal("fail to marshal pipeline", err)
}
- resolvedUnsigned := resolution.NewResolvedResource(unsignedPipelineBytes, nil, sampleRefSource.DeepCopy(), nil)
- requesterUnsigned := resolution.NewRequester(resolvedUnsigned, nil, resource.ResolverPayload{})
+ resolvedUnsigned := test.NewResolvedResource(unsignedPipelineBytes, nil, sampleRefSource.DeepCopy(), nil)
+ requesterUnsigned := test.NewRequester(resolvedUnsigned, nil)
resolvedUnsigned.DataErr = errors.New("resolution error")
prResolutionError := &v1.PipelineRun{
@@ -1243,7 +1242,7 @@ func TestGetPipelineFunc_GetFuncError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
pipelinerun v1.PipelineRun
expectedErr error
}{
@@ -1256,7 +1255,6 @@ func TestGetPipelineFunc_GetFuncError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := context.Background()
store := config.NewStore(logging.FromContext(ctx).Named("config-store"))
featureflags := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@@ -1334,7 +1332,7 @@ spec:
- "bar"
steps:
- name: step1
- image: docker.io/library/ubuntu
+ image: ubuntu
script: |
echo "hello world!"
`
@@ -1362,7 +1360,7 @@ spec:
taskSpec:
steps:
- name: step1
- image: docker.io/library/ubuntu
+ image: ubuntu
script: |
echo "hello world!"
`
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go
index a0f8180f770..7f980d1005a 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go
@@ -31,7 +31,6 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/remote"
- "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/substitution"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"knative.dev/pkg/apis"
@@ -138,37 +137,6 @@ func (t ResolvedPipelineTask) IsCustomTask() bool {
return t.CustomTask
}
-// getReason returns the latest reason if the run has completed successfully
-// If the PipelineTask has a Matrix, getReason returns the failure reason for any failure
-// otherwise, it returns an empty string
-func (t ResolvedPipelineTask) getReason() string {
- if t.IsCustomTask() {
- if len(t.CustomRuns) == 0 {
- return ""
- }
- for _, run := range t.CustomRuns {
- if !run.IsSuccessful() && len(run.Status.Conditions) >= 1 {
- return run.Status.Conditions[0].Reason
- }
- }
- if len(t.CustomRuns) >= 1 && len(t.CustomRuns[0].Status.Conditions) >= 1 {
- return t.CustomRuns[0].Status.Conditions[0].Reason
- }
- }
- if len(t.TaskRuns) == 0 {
- return ""
- }
- for _, taskRun := range t.TaskRuns {
- if !taskRun.IsSuccessful() && len(taskRun.Status.Conditions) >= 1 {
- return taskRun.Status.Conditions[0].Reason
- }
- }
- if len(t.TaskRuns) >= 1 && len(t.TaskRuns[0].Status.Conditions) >= 1 {
- return t.TaskRuns[0].Status.Conditions[0].Reason
- }
- return ""
-}
-
// isSuccessful returns true only if the run has completed successfully
// If the PipelineTask has a Matrix, isSuccessful returns true if all runs have completed successfully
func (t ResolvedPipelineTask) isSuccessful() bool {
@@ -687,14 +655,8 @@ func resolveTask(
case errors.Is(err, remote.ErrRequestInProgress):
return rt, err
case err != nil:
- // some of the resolvers obtain the name from the parameters instead of from the TaskRef.Name field,
- // so we account for both locations when constructing the error
- name := pipelineTask.TaskRef.Name
- if len(strings.TrimSpace(name)) == 0 {
- name = resource.GenerateErrorLogString(string(pipelineTask.TaskRef.Resolver), pipelineTask.TaskRef.Params)
- }
return rt, &TaskNotFoundError{
- Name: name,
+ Name: pipelineTask.TaskRef.Name,
Msg: err.Error(),
}
default:
@@ -754,7 +716,7 @@ func getNewRunNames(ptName, prName string, numberOfRuns int) []string {
return append(taskRunNames, taskRunName)
}
// For a matrix we append i to then end of the fanned out TaskRuns "matrixed-pr-taskrun-0"
- for i := range numberOfRuns {
+ for i := 0; i < numberOfRuns; i++ {
taskRunName := kmeta.ChildName(prName, fmt.Sprintf("-%s-%d", ptName, i))
// check if the taskRun name ends with a matrix instance count
if !strings.HasSuffix(taskRunName, fmt.Sprintf("-%d", i)) {
@@ -886,10 +848,6 @@ func createResultsCacheMatrixedTaskRuns(rpt *ResolvedPipelineTask) (resultsCache
// ValidateParamEnumSubset finds the referenced pipeline-level params in the resolved pipelineTask.
// It then validates if the referenced pipeline-level param enums are subsets of the resolved pipelineTask-level param enums
func ValidateParamEnumSubset(pipelineTaskParams []v1.Param, pipelineParamSpecs []v1.ParamSpec, rt *resources.ResolvedTask) error {
- // When the matrix Task has no TaskRun, the rt will be nil, we should skip the validation.
- if rt == nil {
- return nil
- }
for _, p := range pipelineTaskParams {
// calculate referenced param enums
res, present, errString := substitution.ExtractVariablesFromString(p.Value.StringVal, "params")
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go
index 9f93efe5456..fe6deee7c78 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go
@@ -282,21 +282,18 @@ func makeCustomRunStarted(run v1beta1.CustomRun) *v1beta1.CustomRun {
func makeSucceeded(tr v1.TaskRun) *v1.TaskRun {
newTr := newTaskRun(tr)
newTr.Status.Conditions[0].Status = corev1.ConditionTrue
- newTr.Status.Conditions[0].Reason = "Succeeded"
return newTr
}
func makeCustomRunSucceeded(run v1beta1.CustomRun) *v1beta1.CustomRun {
newRun := newCustomRun(run)
newRun.Status.Conditions[0].Status = corev1.ConditionTrue
- newRun.Status.Conditions[0].Reason = "Succeeded"
return newRun
}
func makeFailed(tr v1.TaskRun) *v1.TaskRun {
newTr := newTaskRun(tr)
newTr.Status.Conditions[0].Status = corev1.ConditionFalse
- newTr.Status.Conditions[0].Reason = "Failed"
return newTr
}
@@ -310,7 +307,6 @@ func makeToBeRetried(tr v1.TaskRun) *v1.TaskRun {
func makeCustomRunFailed(run v1beta1.CustomRun) *v1beta1.CustomRun {
newRun := newCustomRun(run)
newRun.Status.Conditions[0].Status = corev1.ConditionFalse
- newRun.Status.Conditions[0].Reason = "Failed"
return newRun
}
@@ -2494,18 +2490,6 @@ func TestResolvePipelineRun_TaskDoesntExist(t *testing.T) {
Value: *v1.NewStructuredValues("b", "a", "r"),
}},
},
- }, {
- Name: "mytask3",
- TaskRef: &v1.TaskRef{ResolverRef: v1.ResolverRef{Params: v1.Params{{Name: "name", Value: v1.ParamValue{Type: v1.ParamTypeString, StringVal: "foo"}}}}},
- Matrix: &v1.Matrix{
- Params: v1.Params{{
- Name: "foo",
- Value: *v1.NewStructuredValues("f", "o", "o"),
- }, {
- Name: "bar",
- Value: *v1.NewStructuredValues("b", "a", "r"),
- }},
- },
}}
// Return an error when the Task is retrieved, as if it didn't exist
@@ -2528,9 +2512,6 @@ func TestResolvePipelineRun_TaskDoesntExist(t *testing.T) {
t.Fatalf("Pipeline %s: want error, got nil", p.Name)
case errors.As(err, &tnf):
// expected error
- if len(tnf.Name) == 0 {
- t.Fatalf("Pipeline %s: TaskNotFoundError did not have name set: %s", p.Name, tnf.Error())
- }
default:
t.Fatalf("Pipeline %s: Want %T, got %s of type %T", p.Name, tnf, err, err)
}
@@ -2567,7 +2548,7 @@ func TestResolvePipelineRun_VerificationFailed(t *testing.T) {
for _, pt := range pts {
rt, _ := ResolvePipelineTask(context.Background(), pr, getTask, getTaskRun, nopGetCustomRun, pt, nil)
if d := cmp.Diff(verificationResult, rt.ResolvedTask.VerificationResult, cmpopts.EquateErrors()); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
}
@@ -3684,7 +3665,7 @@ func TestResolvePipelineRunTask_WithMatrix(t *testing.T) {
var taskRuns []*v1.TaskRun
var taskRunsNames []string
taskRunsMap := map[string]*v1.TaskRun{}
- for i := range 9 {
+ for i := 0; i < 9; i++ {
trName := fmt.Sprintf("%s-%s-%d", pipelineRunName, pipelineTaskName, i)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
@@ -3842,7 +3823,7 @@ func TestResolvePipelineRunTask_WithMatrixedCustomTask(t *testing.T) {
var runs []*v1beta1.CustomRun
var runNames []string
runsMap := map[string]*v1beta1.CustomRun{}
- for i := range 9 {
+ for i := 0; i < 9; i++ {
runName := fmt.Sprintf("%s-%s-%d", pipelineRunName, pipelineTaskName, i)
run := &v1beta1.CustomRun{
ObjectMeta: metav1.ObjectMeta{
@@ -4814,197 +4795,6 @@ func TestIsRunning(t *testing.T) {
}
}
-func TestGetReason(t *testing.T) {
- for _, tc := range []struct {
- name string
- rpt ResolvedPipelineTask
- want string
- }{
- {
- name: "taskrun created but the conditions were not initialized",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "namespace",
- Name: "taskRun",
- },
- }},
- },
- },
- {
- name: "taskrun not started",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- },
- },
- {
- name: "run not started",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomTask: true,
- },
- },
- {
- name: "taskrun running",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{makeStarted(trs[0])},
- },
- },
- {
- name: "run running",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomTask: true,
- CustomRuns: []*v1beta1.CustomRun{makeCustomRunStarted(customRuns[0])},
- },
- },
- {
- name: "taskrun succeeded",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{makeSucceeded(trs[0])},
- },
- want: "Succeeded",
- },
- {
- name: "run succeeded",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomTask: true,
- CustomRuns: []*v1beta1.CustomRun{makeCustomRunSucceeded(customRuns[0])},
- },
- want: "Succeeded",
- },
- {
- name: "taskrun failed",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{makeFailed(trs[0])},
- },
- want: "Failed",
- },
- {
- name: "run failed",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomTask: true,
- CustomRuns: []*v1beta1.CustomRun{makeCustomRunFailed(customRuns[0])},
- },
- want: "Failed",
- },
- {
- name: "taskrun failed: retried",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task", Retries: 1},
- TaskRuns: []*v1.TaskRun{withRetries(makeFailed(trs[0]))},
- },
- want: "Failed",
- },
- {
- name: "run failed: retried",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task", Retries: 1},
- CustomTask: true,
- CustomRuns: []*v1beta1.CustomRun{withCustomRunRetries(makeCustomRunFailed(customRuns[0]))},
- },
- want: "Failed",
- },
- {
- name: "taskrun cancelled",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{withCancelled(makeFailed(trs[0]))},
- },
- want: v1.TaskRunReasonCancelled.String(),
- },
- {
- name: "taskrun cancelled but not failed",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- TaskRuns: []*v1.TaskRun{withCancelled(newTaskRun(trs[0]))},
- },
- want: v1.TaskRunReasonCancelled.String(),
- },
- {
- name: "run cancelled",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomRuns: []*v1beta1.CustomRun{withCustomRunCancelled(makeCustomRunFailed(customRuns[0]))},
- CustomTask: true,
- },
- want: "CustomRunCancelled",
- },
- {
- name: "run cancelled but not failed",
- rpt: ResolvedPipelineTask{
- PipelineTask: &v1.PipelineTask{Name: "task"},
- CustomRuns: []*v1beta1.CustomRun{withCustomRunCancelled(newCustomRun(customRuns[0]))},
- CustomTask: true,
- },
- want: "CustomRunCancelled",
- },
- {
- name: "matrixed taskruns succeeded",
- rpt: ResolvedPipelineTask{
- PipelineTask: matrixedPipelineTask,
- TaskRuns: []*v1.TaskRun{makeSucceeded(trs[0]), makeSucceeded(trs[1])},
- },
- want: "Succeeded",
- },
- {
- name: "matrixed runs succeeded",
- rpt: ResolvedPipelineTask{
- CustomTask: true,
- PipelineTask: matrixedPipelineTask,
- CustomRuns: []*v1beta1.CustomRun{makeCustomRunSucceeded(customRuns[0]), makeCustomRunSucceeded(customRuns[1])},
- },
- want: "Succeeded",
- },
- {
- name: "matrixed taskruns failed",
- rpt: ResolvedPipelineTask{
- PipelineTask: matrixedPipelineTask,
- TaskRuns: []*v1.TaskRun{makeFailed(trs[0]), makeFailed(trs[1])},
- },
- want: "Failed",
- },
- {
- name: "matrixed runs failed",
- rpt: ResolvedPipelineTask{
- CustomTask: true,
- PipelineTask: matrixedPipelineTask,
- CustomRuns: []*v1beta1.CustomRun{makeCustomRunFailed(customRuns[0]), makeCustomRunFailed(customRuns[1])},
- },
- want: "Failed",
- },
- {
- name: "matrixed taskruns cancelled",
- rpt: ResolvedPipelineTask{
- PipelineTask: matrixedPipelineTask,
- TaskRuns: []*v1.TaskRun{withCancelled(makeFailed(trs[0])), withCancelled(makeFailed(trs[1]))},
- },
- want: v1.TaskRunReasonCancelled.String(),
- },
- {
- name: "matrixed runs cancelled",
- rpt: ResolvedPipelineTask{
- CustomTask: true,
- PipelineTask: matrixedPipelineTask,
- CustomRuns: []*v1beta1.CustomRun{withCustomRunCancelled(makeCustomRunFailed(customRuns[0])), withCustomRunCancelled(makeCustomRunFailed(customRuns[1]))},
- },
- want: "CustomRunCancelled",
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- if got := tc.rpt.getReason(); got != tc.want {
- t.Errorf("expected getReason: %s but got %s", tc.want, got)
- }
- })
- }
-}
-
func TestCreateResultsCacheMatrixedTaskRuns(t *testing.T) {
for _, tc := range []struct {
name string
@@ -5446,29 +5236,6 @@ func TestValidateParamEnumSubset_Valid(t *testing.T) {
},
},
},
- }, {
- name: "rt is nil - pass",
- params: []v1.Param{
- {
- Name: "resolved-task-p1",
- Value: v1.ParamValue{
- StringVal: "$(params.p1) and $(params.p2)",
- },
- },
- },
- pipelinePs: []v1.ParamSpec{
- {
- Name: "p1",
- Type: v1.ParamTypeString,
- Enum: []string{"v1", "v2"},
- },
- {
- Name: "p2",
- Type: v1.ParamTypeString,
- Enum: []string{"v3", "v4"},
- },
- },
- rt: nil,
},
}
@@ -5553,7 +5320,6 @@ func TestValidateParamEnumSubset_Invalid(t *testing.T) {
},
},
},
- rt: &resources.ResolvedTask{},
wantErr: errors.New("unexpected error in ExtractVariablesFromString: Invalid referencing of parameters in \"$(params.p1.aaa.bbb)\"! Only two dot-separated components after the prefix \"params\" are allowed."),
}}
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go
index 0f1b82621cc..75364fef691 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate.go
@@ -43,7 +43,6 @@ const (
PipelineTaskStatusPrefix = "tasks."
// PipelineTaskStatusSuffix is a suffix of the param representing execution state of pipelineTask
PipelineTaskStatusSuffix = ".status"
- PipelineTaskReasonSuffix = ".reason"
)
// PipelineRunState is a slice of ResolvedPipelineRunTasks the represents the current execution
@@ -182,30 +181,6 @@ func (state PipelineRunState) GetTaskRunsResults() map[string][]v1.TaskRunResult
return results
}
-// GetTaskRunsArtifacts returns a map of all successfully completed TaskRuns in the state, with the pipeline task name as
-// the key and the artifacts from the corresponding TaskRun as the value. It only includes tasks which have completed successfully.
-func (state PipelineRunState) GetTaskRunsArtifacts() map[string]*v1.Artifacts {
- results := make(map[string]*v1.Artifacts)
- for _, rpt := range state {
- if rpt.IsCustomTask() {
- continue
- }
- if !rpt.isSuccessful() {
- continue
- }
- if rpt.PipelineTask.IsMatrixed() {
- var ars v1.Artifacts
- for _, tr := range rpt.TaskRuns {
- ars.Merge(tr.Status.Artifacts)
- }
- results[rpt.PipelineTask.Name] = &ars
- } else {
- results[rpt.PipelineTask.Name] = rpt.TaskRuns[0].Status.Artifacts
- }
- }
- return results
-}
-
// ConvertResultsMapToTaskRunResults converts the map of results from Matrixed PipelineTasks to a list
// of TaskRunResults to standard the format
func ConvertResultsMapToTaskRunResults(resultsMap map[string][]string) []v1.TaskRunResult {
@@ -630,7 +605,6 @@ func (facts *PipelineRunFacts) GetPipelineTaskStatus() map[string]string {
s = PipelineTaskStateNone
}
tStatus[PipelineTaskStatusPrefix+t.PipelineTask.Name+PipelineTaskStatusSuffix] = s
- tStatus[PipelineTaskStatusPrefix+t.PipelineTask.Name+PipelineTaskReasonSuffix] = t.getReason()
}
}
// initialize aggregate status of all dag tasks to None
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go
index 1c9be5a6793..7c42fe300e9 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go
@@ -2351,9 +2351,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[1]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "",
PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2362,9 +2360,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[1]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "",
PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2373,9 +2369,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[1]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: v1.TaskRunReasonSuccessful.String(),
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "Succeeded",
PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2384,9 +2378,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[1]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: v1.TaskRunReasonFailed.String(),
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "Failed",
PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: v1.PipelineRunReasonFailed.String(),
},
}, {
@@ -2395,9 +2387,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[1]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: v1.TaskRunReasonSuccessful.String(),
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "Succeeded",
PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskStatusSuffix: v1.TaskRunReasonSuccessful.String(),
- PipelineTaskStatusPrefix + pts[1].Name + PipelineTaskReasonSuffix: "Succeeded",
v1.PipelineTasksAggregateStatus: v1.PipelineRunReasonSuccessful.String(),
},
}, {
@@ -2413,7 +2403,6 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[9]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[9].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[9].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2428,7 +2417,6 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[10]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[10].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[10].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: v1.PipelineRunReasonCompleted.String(),
},
}, {
@@ -2449,9 +2437,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[11]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: "",
PipelineTaskStatusPrefix + pts[11].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[11].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2460,7 +2446,6 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[4]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[4].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[4].Name + PipelineTaskReasonSuffix: v1.TaskRunReasonCancelled.String(),
v1.PipelineTasksAggregateStatus: PipelineTaskStateNone,
},
}, {
@@ -2482,9 +2467,7 @@ func TestPipelineRunFacts_GetPipelineTaskStatus(t *testing.T) {
dagTasks: []v1.PipelineTask{pts[0], pts[10]},
expectedStatus: map[string]string{
PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskStatusSuffix: v1.PipelineRunReasonFailed.String(),
- PipelineTaskStatusPrefix + pts[0].Name + PipelineTaskReasonSuffix: v1.PipelineRunReasonFailed.String(),
PipelineTaskStatusPrefix + pts[10].Name + PipelineTaskStatusSuffix: PipelineTaskStateNone,
- PipelineTaskStatusPrefix + pts[10].Name + PipelineTaskReasonSuffix: "",
v1.PipelineTasksAggregateStatus: v1.PipelineRunReasonFailed.String(),
},
}}
@@ -3163,300 +3146,6 @@ func TestPipelineRunState_GetResultsFuncs(t *testing.T) {
}
}
-func TestPipelineRunState_GetTaskRunsArtifacts(t *testing.T) {
- testCases := []struct {
- name string
- state PipelineRunState
- expectedArtifacts map[string]*v1.Artifacts
- }{
- {
- name: "successful-task-with-artifacts",
- state: PipelineRunState{{
- TaskRunNames: []string{"successful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "successful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }},
- expectedArtifacts: map[string]*v1.Artifacts{"successful-task-with-artifacts-1": {
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }},
- },
- {
- name: "two-successful-tasks-with-artifacts",
- state: PipelineRunState{{
- TaskRunNames: []string{"first-successful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "successful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }, {
- TaskRunNames: []string{"second-successful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "successful-task-with-artifacts-2",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }},
- expectedArtifacts: map[string]*v1.Artifacts{"successful-task-with-artifacts-1": {
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }, "successful-task-with-artifacts-2": {
- Inputs: []v1.Artifact{{Name: "source2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }},
- },
- {
- name: "Skip retrieving artifacts from unsuccessful task",
- state: PipelineRunState{{
- TaskRunNames: []string{"unsuccessful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "unsuccessful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionFalse,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }},
- expectedArtifacts: map[string]*v1.Artifacts{},
- },
- {
- name: "One successful task and one failed task, only retrieving artifacts from the successful one",
- state: PipelineRunState{
- {
- TaskRunNames: []string{"successful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "successful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- },
- {
- TaskRunNames: []string{"unsuccessful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "unsuccessful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionFalse,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source0", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image0", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }},
- expectedArtifacts: map[string]*v1.Artifacts{"successful-task-with-artifacts-1": {
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }},
- },
- {
- name: "One standard successful taskRun with artifacts and one custom task, custom task has no effect",
- state: PipelineRunState{{
- CustomRunNames: []string{"successful-run-without-results"},
- CustomTask: true,
- PipelineTask: &v1.PipelineTask{
- Name: "successful-run-without-results-1",
- },
- CustomRuns: []*v1beta1.CustomRun{
- {
- Status: v1beta1.CustomRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- CustomRunStatusFields: v1beta1.CustomRunStatusFields{},
- },
- }},
- },
- {
- TaskRunNames: []string{"successful-task-with-artifacts"},
- PipelineTask: &v1.PipelineTask{
- Name: "successful-task-with-artifacts-1",
- },
- TaskRuns: []*v1.TaskRun{{
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- },
- },
- expectedArtifacts: map[string]*v1.Artifacts{"successful-task-with-artifacts-1": {
- Inputs: []v1.Artifact{{Name: "source", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }},
- },
- {
- name: "matrixed tasks",
- state: PipelineRunState{{
- TaskRunNames: []string{
- "matrixed-task-run-0",
- "matrixed-task-run-1",
- "matrixed-task-run-2",
- "matrixed-task-run-3",
- },
- PipelineTask: &v1.PipelineTask{
- Name: "matrixed-task-with-artifacts",
- TaskRef: &v1.TaskRef{
- Name: "task",
- Kind: "Task",
- APIVersion: "v1",
- },
- Matrix: &v1.Matrix{
- Params: v1.Params{{
- Name: "foobar",
- Value: v1.ParamValue{Type: v1.ParamTypeArray, ArrayVal: []string{"foo", "bar"}},
- }, {
- Name: "quxbaz",
- Value: v1.ParamValue{Type: v1.ParamTypeArray, ArrayVal: []string{"qux", "baz"}},
- }}},
- },
- TaskRuns: []*v1.TaskRun{{
- TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "matrixed-task-run-0"},
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- Reason: v1.TaskRunReasonSuccessful.String(),
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source1", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image1", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }, {
- TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "matrixed-task-run-1"},
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- Reason: v1.TaskRunReasonSuccessful.String(),
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }, {
- TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "matrixed-task-run-2"},
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- Reason: v1.TaskRunReasonSuccessful.String(),
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source3", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image3", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }, {
- TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1"},
- ObjectMeta: metav1.ObjectMeta{Name: "matrixed-task-run-3"},
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{Conditions: []apis.Condition{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- Reason: v1.TaskRunReasonSuccessful.String(),
- }}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{
- Inputs: []v1.Artifact{{Name: "source4", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image4", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- },
- }},
- }},
- }},
- expectedArtifacts: map[string]*v1.Artifacts{"matrixed-task-with-artifacts": {
- Inputs: []v1.Artifact{{Name: "source1", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}, {Name: "source2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}, {Name: "source3", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}, {Name: "source4", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"}, Uri: "pkg:example.github.com/inputs"}}}},
- Outputs: []v1.Artifact{{Name: "image1", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}, {Name: "image2", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}, {Name: "image3", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}, {Name: "image4", Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2"}, Uri: "pkg:github/package-url/purl-spec@244fd47e07d1004f0aed9c"}}}},
- }},
- },
- }
-
- for _, tt := range testCases {
- got := tt.state.GetTaskRunsArtifacts()
- if d := cmp.Diff(tt.expectedArtifacts, got, cmpopts.SortSlices(func(a, b v1.Artifact) bool { return a.Name > b.Name })); d != "" {
- t.Errorf("GetTaskRunsArtifacts() did not produce expected artifacts for test %s: %s", tt.name, diff.PrintWantGot(d))
- }
- }
-}
-
func TestPipelineRunState_GetChildReferences(t *testing.T) {
testCases := []struct {
name string
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution.go b/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution.go
index e441b8eb0b9..790f5c6c105 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution.go
@@ -17,7 +17,6 @@ limitations under the License.
package resources
import (
- "encoding/json"
"errors"
"fmt"
"sort"
@@ -27,9 +26,11 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
)
-// ErrInvalidTaskResultReference indicates that the reason for the failure status is that there
-// is an invalid task result reference
-var ErrInvalidTaskResultReference = pipelineErrors.WrapUserError(errors.New("Invalid task result reference"))
+var (
+ // ErrInvalidTaskResultReference indicates that the reason for the failure status is that there
+ // is an invalid task result reference
+ ErrInvalidTaskResultReference = pipelineErrors.WrapUserError(errors.New("Invalid task result reference"))
+)
// ResolvedResultRefs represents all of the ResolvedResultRef for a pipeline task
type ResolvedResultRefs []*ResolvedResultRef
@@ -163,28 +164,13 @@ func resolveCustomResultRef(customRuns []*v1beta1.CustomRun, resultRef *v1.Resul
return nil, err
}
return &ResolvedResultRef{
- Value: *paramValueFromCustomRunResult(runValue),
+ Value: *v1.NewStructuredValues(runValue),
FromTaskRun: "",
FromRun: runName,
ResultReference: *resultRef,
}, nil
}
-func paramValueFromCustomRunResult(result string) *v1.ParamValue {
- var arrayResult []string
- // for fan out array result, which is represented as string, we should make it to array type param value
- if err := json.Unmarshal([]byte(result), &arrayResult); err == nil && len(arrayResult) > 0 {
- if len(arrayResult) > 1 {
- return v1.NewStructuredValues(arrayResult[0], arrayResult[1:]...)
- }
- return &v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{arrayResult[0]},
- }
- }
- return v1.NewStructuredValues(result)
-}
-
func resolveResultRef(taskRuns []*v1.TaskRun, resultRef *v1.ResultRef) (*ResolvedResultRef, error) {
taskRun := taskRuns[0]
taskRunName := taskRun.Name
@@ -209,7 +195,6 @@ func findRunResultForParam(customRun *v1beta1.CustomRun, reference *v1.ResultRef
err := fmt.Errorf("%w: Could not find result with name %s for task %s", ErrInvalidTaskResultReference, reference.Result, reference.PipelineTask)
return "", err
}
-
func findTaskResultForParam(taskRun *v1.TaskRun, reference *v1.ResultRef) (v1.ResultValue, error) {
results := taskRun.Status.TaskRunStatusFields.Results
for _, result := range results {
@@ -254,7 +239,7 @@ func (rs ResolvedResultRefs) getStringReplacements() map[string]string {
for _, r := range rs {
switch r.Value.Type {
case v1.ParamTypeArray:
- for i := range len(r.Value.ArrayVal) {
+ for i := 0; i < len(r.Value.ArrayVal); i++ {
for _, target := range r.getReplaceTargetfromArrayIndex(i) {
replacements[target] = r.Value.ArrayVal[i]
}
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go b/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go
index e11c80e18f9..1658445abcf 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go
@@ -936,53 +936,3 @@ func TestValidateArrayResultsIndex(t *testing.T) {
})
}
}
-
-func TestParamValueFromCustomRunResult(t *testing.T) {
- type args struct {
- result string
- }
- tests := []struct {
- name string
- args args
- want *v1.ParamValue
- }{
- {
- name: "multiple array elements result",
- args: args{
- result: `["amd64", "arm64"]`,
- },
- want: &v1.ParamValue{
- Type: "array",
- ArrayVal: []string{"amd64", "arm64"},
- },
- },
- {
- name: "single array elements result",
- args: args{
- result: `[ "amd64" ]`,
- },
- want: &v1.ParamValue{
- Type: "array",
- ArrayVal: []string{"amd64"},
- },
- },
- {
- name: "simple string result",
- args: args{
- result: "amd64",
- },
- want: &v1.ParamValue{
- Type: "string",
- StringVal: "amd64",
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := paramValueFromCustomRunResult(tt.args.result)
- if d := cmp.Diff(tt.want, got); d != "" {
- t.Fatalf("paramValueFromCustomRunResult %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/reconciler/pipelinerun/resources/validate_params_test.go b/upstream/pkg/reconciler/pipelinerun/resources/validate_params_test.go
index 18fcd6241e9..9f32d1d6290 100644
--- a/upstream/pkg/reconciler/pipelinerun/resources/validate_params_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/resources/validate_params_test.go
@@ -610,6 +610,7 @@ func TestValidateParamArrayIndex_valid(t *testing.T) {
params: v1.Params{{Name: "second-param", Value: *v1.NewStructuredValues("second-value", "second-value-again")}},
},
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
err := resources.ValidateParamArrayIndex(&tt.original, tt.params)
@@ -836,6 +837,7 @@ func TestValidateParamArrayIndex_invalid(t *testing.T) {
expected: errors.New("non-existent param references:[$(params.first-param[2]) $(params.second-param[3])]"),
},
} {
+ tt := tt // capture range variable
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
err := resources.ValidateParamArrayIndex(&tt.original, tt.params)
diff --git a/upstream/pkg/reconciler/pipelinerun/timeout.go b/upstream/pkg/reconciler/pipelinerun/timeout.go
index 845ef3c22dd..8ae29a62d30 100644
--- a/upstream/pkg/reconciler/pipelinerun/timeout.go
+++ b/upstream/pkg/reconciler/pipelinerun/timeout.go
@@ -21,7 +21,6 @@ import (
"strings"
"time"
- pipelineErrors "github.com/tektoncd/pipeline/pkg/apis/pipeline/errors"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
@@ -126,10 +125,6 @@ func timeoutPipelineTasksForTaskNames(ctx context.Context, logger *zap.SugaredLo
logger.Infof("patching TaskRun %s for timeout", taskRunName)
if err := timeoutTaskRun(ctx, taskRunName, pr.Namespace, clientSet); err != nil {
- if pipelineErrors.IsImmutableTaskRunSpecError(err) {
- // The TaskRun may have completed and the spec field is immutable, we should ignore this error.
- continue
- }
errs = append(errs, fmt.Errorf("failed to patch TaskRun `%s` with timeout: %w", taskRunName, err).Error())
continue
}
diff --git a/upstream/pkg/reconciler/pipelinerun/timeout_test.go b/upstream/pkg/reconciler/pipelinerun/timeout_test.go
index 5e6955f7210..a369aeee268 100644
--- a/upstream/pkg/reconciler/pipelinerun/timeout_test.go
+++ b/upstream/pkg/reconciler/pipelinerun/timeout_test.go
@@ -245,6 +245,7 @@ func TestTimeoutPipelineRun(t *testing.T) {
wantErr: true,
}}
for _, tc := range testCases {
+ tc := tc
t.Run(tc.name, func(t *testing.T) {
d := test.Data{
PipelineRuns: []*v1.PipelineRun{tc.pipelineRun},
diff --git a/upstream/pkg/reconciler/pipelinerun/tracing.go b/upstream/pkg/reconciler/pipelinerun/tracing.go
index 7ab4d1755c5..8aedd34948e 100644
--- a/upstream/pkg/reconciler/pipelinerun/tracing.go
+++ b/upstream/pkg/reconciler/pipelinerun/tracing.go
@@ -45,7 +45,7 @@ func initTracing(ctx context.Context, tracerProvider trace.TracerProvider, pr *v
pro := otel.GetTextMapPropagator()
// SpanContext was created already
- if len(pr.Status.SpanContext) > 0 {
+ if pr.Status.SpanContext != nil && len(pr.Status.SpanContext) > 0 {
return pro.Extract(ctx, propagation.MapCarrier(pr.Status.SpanContext))
}
diff --git a/upstream/pkg/reconciler/taskrun/controller.go b/upstream/pkg/reconciler/taskrun/controller.go
index 84ab26185d2..451c78f9795 100644
--- a/upstream/pkg/reconciler/taskrun/controller.go
+++ b/upstream/pkg/reconciler/taskrun/controller.go
@@ -31,7 +31,7 @@ import (
"github.com/tektoncd/pipeline/pkg/pod"
cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
- resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
+ resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
"github.com/tektoncd/pipeline/pkg/tracing"
@@ -65,13 +65,8 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
secretinformer := secretinformer.Get(ctx)
spireClient := spire.GetControllerAPIClient(ctx)
tracerProvider := tracing.New(TracerProviderName, logger.Named("tracing"))
- taskrunmetricsRecorder := taskrunmetrics.Get(ctx)
//nolint:contextcheck // OnStore methods does not support context as a parameter
- configStore := config.NewStore(logger.Named("config-store"),
- taskrunmetrics.OnStore(logger, taskrunmetricsRecorder),
- spire.OnStore(ctx, logger),
- tracerProvider.OnStore(secretinformer.Lister()),
- )
+ configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger), spire.OnStore(ctx, logger), tracerProvider.OnStore(secretinformer.Lister()))
configStore.WatchConfigs(cmw)
entrypointCache, err := pod.NewEntrypointCache(kubeclientset)
@@ -89,7 +84,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex
limitrangeLister: limitrangeInformer.Lister(),
verificationPolicyLister: verificationpolicyInformer.Lister(),
cloudEventClient: cloudeventclient.Get(ctx),
- metrics: taskrunmetricsRecorder,
+ metrics: taskrunmetrics.Get(ctx),
entrypointCache: entrypointCache,
podLister: podInformer.Lister(),
pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger),
diff --git a/upstream/pkg/reconciler/taskrun/resources/apply.go b/upstream/pkg/reconciler/taskrun/resources/apply.go
index e6036faf832..b54a618dba1 100644
--- a/upstream/pkg/reconciler/taskrun/resources/apply.go
+++ b/upstream/pkg/reconciler/taskrun/resources/apply.go
@@ -21,11 +21,9 @@ import (
"fmt"
"path/filepath"
"regexp"
- "sort"
"strconv"
"strings"
- "github.com/tektoncd/pipeline/internal/artifactref"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/container"
@@ -51,14 +49,6 @@ var (
"inputs.params.%s",
}
- substitutionToParamNamePatterns = []string{
- `^params\.(\w+)$`,
- `^params\["([^"]+)"\]$`,
- `^params\['([^']+)'\]$`,
- // FIXME(vdemeester) Remove that with deprecating v1beta1
- `^inputs\.params\.(\w+)$`,
- }
-
paramIndexRegexPatterns = []string{
`\$\(params.%s\[([0-9]*)*\*?\]\)`,
`\$\(params\[%q\]\[([0-9]*)*\*?\]\)`,
@@ -67,7 +57,7 @@ var (
)
// applyStepActionParameters applies the params from the Task and the underlying Step to the referenced StepAction.
-func applyStepActionParameters(step *v1.Step, spec *v1.TaskSpec, tr *v1.TaskRun, stepParams v1.Params, defaults []v1.ParamSpec) (*v1.Step, error) {
+func applyStepActionParameters(step *v1.Step, spec *v1.TaskSpec, tr *v1.TaskRun, stepParams v1.Params, defaults []v1.ParamSpec) *v1.Step {
if stepParams != nil {
stringR, arrayR, objectR := getTaskParameters(spec, tr, spec.Params...)
stepParams = stepParams.ReplaceVariables(stringR, arrayR, objectR)
@@ -88,45 +78,8 @@ func applyStepActionParameters(step *v1.Step, spec *v1.TaskSpec, tr *v1.TaskRun,
for k, v := range stepResultReplacements {
stringReplacements[k] = v
}
-
- // Check if there are duplicate keys in the replacements
- // If the same key is present in both stringReplacements and arrayReplacements, it means
- // that the default value and the passed value have different types.
- err := checkForDuplicateKeys(stringReplacements, arrayReplacements)
- if err != nil {
- return nil, err
- }
-
container.ApplyStepReplacements(step, stringReplacements, arrayReplacements)
- return step, nil
-}
-
-// checkForDuplicateKeys checks if there are duplicate keys in the replacements
-func checkForDuplicateKeys(stringReplacements map[string]string, arrayReplacements map[string][]string) error {
- keys := make([]string, 0, len(stringReplacements))
- for k := range stringReplacements {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- if _, ok := arrayReplacements[k]; ok {
- paramName := paramNameFromReplacementKey(k)
- return fmt.Errorf("invalid parameter substitution: %s. Please check the types of the default value and the passed value", paramName)
- }
- }
- return nil
-}
-
-// paramNameFromReplacementKey returns the param name from the replacement key in best effort
-func paramNameFromReplacementKey(key string) string {
- for _, regexPattern := range substitutionToParamNamePatterns {
- re := regexp.MustCompile(regexPattern)
- if matches := re.FindStringSubmatch(key); matches != nil {
- return matches[1]
- }
- }
- // If no match is found, return the key
- return key
+ return step
}
// findArrayIndexParamUsage finds the array index in a string using array param substitution
@@ -246,7 +199,7 @@ func replacementsFromDefaultParams(defaults v1.ParamSpecs) (map[string]string, m
switch p.Default.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
- for i := range len(p.Default.ArrayVal) {
+ for i := 0; i < len(p.Default.ArrayVal); i++ {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Default.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Default.ArrayVal
@@ -281,7 +234,7 @@ func replacementsFromParams(params v1.Params) (map[string]string, map[string][]s
switch p.Value.Type {
case v1.ParamTypeArray:
for _, pattern := range paramPatterns {
- for i := range len(p.Value.ArrayVal) {
+ for i := 0; i < len(p.Value.ArrayVal); i++ {
stringReplacements[fmt.Sprintf(pattern+"[%d]", p.Name, i)] = p.Value.ArrayVal[i]
}
arrayReplacements[fmt.Sprintf(pattern, p.Name)] = p.Value.ArrayVal
@@ -446,20 +399,19 @@ func getTaskResultReplacements(spec *v1.TaskSpec) map[string]string {
return stringReplacements
}
-// ApplyArtifacts replaces the occurrences of artifacts.path and step.artifacts.path with the absolute tekton internal path
+// ApplyArtifacts replaces the occurrences of step.artifacts.path with the absolute tekton internal path
func ApplyArtifacts(spec *v1.TaskSpec) *v1.TaskSpec {
for i := range spec.Steps {
- stringReplacements := getArtifactReplacements(spec.Steps[i], i)
+ stringReplacements := getStepArtifactReplacements(spec.Steps[i], i)
container.ApplyStepReplacements(&spec.Steps[i], stringReplacements, map[string][]string{})
}
return spec
}
-func getArtifactReplacements(step v1.Step, idx int) map[string]string {
+func getStepArtifactReplacements(step v1.Step, idx int) map[string]string {
stringReplacements := map[string]string{}
stepName := pod.StepName(step.Name, idx)
- stringReplacements[artifactref.StepArtifactPathPattern] = filepath.Join(pipeline.StepsDir, stepName, "artifacts", "provenance.json")
- stringReplacements[artifactref.TaskArtifactPathPattern] = filepath.Join(pipeline.ArtifactsDir, "provenance.json")
+ stringReplacements["step.artifacts.path"] = filepath.Join(pipeline.StepsDir, stepName, "artifacts", "provenance.json")
return stringReplacements
}
@@ -470,7 +422,8 @@ func ApplyStepExitCodePath(spec *v1.TaskSpec) *v1.TaskSpec {
stringReplacements := map[string]string{}
for i, step := range spec.Steps {
- stringReplacements[fmt.Sprintf("steps.%s.exitCode.path", pod.StepName(step.Name, i))] = filepath.Join(pipeline.StepsDir, pod.StepName(step.Name, i), "exitCode")
+ stringReplacements[fmt.Sprintf("steps.%s.exitCode.path", pod.StepName(step.Name, i))] =
+ filepath.Join(pipeline.StepsDir, pod.StepName(step.Name, i), "exitCode")
}
return ApplyReplacements(spec, stringReplacements, map[string][]string{}, map[string]map[string]string{})
}
diff --git a/upstream/pkg/reconciler/taskrun/resources/apply_test.go b/upstream/pkg/reconciler/taskrun/resources/apply_test.go
index 9b1d9f1be1b..9601df0b4bf 100644
--- a/upstream/pkg/reconciler/taskrun/resources/apply_test.go
+++ b/upstream/pkg/reconciler/taskrun/resources/apply_test.go
@@ -551,14 +551,12 @@ var (
arrayTaskRun0Elements = &v1.TaskRun{
Spec: v1.TaskRunSpec{
- Params: []v1.Param{
- {
- Name: "array-param",
- Value: v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{},
- },
- },
+ Params: []v1.Param{{
+ Name: "array-param",
+ Value: v1.ParamValue{
+ Type: v1.ParamTypeArray,
+ ArrayVal: []string{},
+ }},
},
},
}
@@ -1103,15 +1101,13 @@ func TestApplyWorkspaces(t *testing.T) {
Name: "$(workspaces.myws.volume)",
},
},
- },
- }, {
+ }}, {
Name: "some-secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "$(workspaces.myws.volume)",
},
- },
- }, {
+ }}, {
Name: "some-pvc",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
@@ -1483,7 +1479,7 @@ func TestContext(t *testing.T) {
t.Run(tc.description, func(t *testing.T) {
got := resources.ApplyContexts(&tc.spec, tc.taskName, &tc.tr)
if d := cmp.Diff(&tc.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -1492,14 +1488,12 @@ func TestContext(t *testing.T) {
func TestTaskResults(t *testing.T) {
names.TestingSeed()
ts := &v1.TaskSpec{
- Results: []v1.TaskResult{
- {
- Name: "current.date.unix.timestamp",
- Description: "The current date in unix timestamp format",
- }, {
- Name: "current-date-human-readable",
- Description: "The current date in humand readable format",
- },
+ Results: []v1.TaskResult{{
+ Name: "current.date.unix.timestamp",
+ Description: "The current date in unix timestamp format",
+ }, {
+ Name: "current-date-human-readable",
+ Description: "The current date in humand readable format"},
},
Steps: []v1.Step{{
Name: "print-date-unix-timestamp",
@@ -1636,7 +1630,7 @@ func TestApplyCredentialsPath(t *testing.T) {
t.Run(tc.description, func(t *testing.T) {
got := resources.ApplyCredentialsPath(&tc.spec, tc.path)
if d := cmp.Diff(&tc.want, got); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
@@ -1794,11 +1788,10 @@ func TestApplyParametersToWorkspaceBindings(t *testing.T) {
},
},
},
- Params: v1.Params{
- {Name: "secret-name", Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "secret-value",
- }},
+ Params: v1.Params{{Name: "secret-name", Value: v1.ParamValue{
+ Type: v1.ParamTypeString,
+ StringVal: "secret-value",
+ }},
},
},
},
@@ -2028,15 +2021,14 @@ func TestApplyParametersToWorkspaceBindings(t *testing.T) {
func TestArtifacts(t *testing.T) {
ts := &v1.TaskSpec{
- Steps: []v1.Step{
- {
- Name: "name1",
- Image: "bash:latest",
- Args: []string{
- "$(step.artifacts.path)",
- },
- Script: "#!/usr/bin/env bash\n echo -n $(step.artifacts.path)",
+ Steps: []v1.Step{{
+ Name: "name1",
+ Image: "bash:latest",
+ Args: []string{
+ "$(step.artifacts.path)",
},
+ Script: "#!/usr/bin/env bash\n echo -n $(step.artifacts.path)",
+ },
},
}
diff --git a/upstream/pkg/reconciler/taskrun/resources/taskref.go b/upstream/pkg/reconciler/taskrun/resources/taskref.go
index 2a1e9d8ca9b..b918ec7b6b2 100644
--- a/upstream/pkg/reconciler/taskrun/resources/taskref.go
+++ b/upstream/pkg/reconciler/taskrun/resources/taskref.go
@@ -25,13 +25,11 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/remote"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/remote/resolution"
- remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- "github.com/tektoncd/pipeline/pkg/substitution"
+ "github.com/tektoncd/pipeline/pkg/remote/resolution"
+ remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -39,6 +37,12 @@ import (
"knative.dev/pkg/kmeta"
)
+// This error is defined in etcd at
+// https://github.com/etcd-io/etcd/blob/5b226e0abf4100253c94bb71f47d6815877ed5a2/server/etcdserver/errors.go#L30
+// TODO: If/when https://github.com/kubernetes/kubernetes/issues/106491 is addressed,
+// we should stop relying on a hardcoded string.
+var errEtcdLeaderChange = "etcdserver: leader changed"
+
// GetTaskKind returns the referenced Task kind (Task, ClusterTask, ...) if the TaskRun is using TaskRef.
func GetTaskKind(taskrun *v1.TaskRun) v1.TaskKind {
kind := v1.NamespacedTaskKind
@@ -93,7 +97,6 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset
// casting it to a TaskObject.
return func(ctx context.Context, name string) (*v1.Task, *v1.RefSource, *trustedresources.VerificationResult, error) {
var replacedParams v1.Params
- var url string
if ownerAsTR, ok := owner.(*v1.TaskRun); ok {
stringReplacements, arrayReplacements, _ := replacementsFromParams(ownerAsTR.Spec.Params)
for k, v := range getContextReplacements("", ownerAsTR) {
@@ -103,23 +106,10 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset
p.Value.ApplyReplacements(stringReplacements, arrayReplacements, nil)
replacedParams = append(replacedParams, p)
}
- if err := v1.RefNameLikeUrl(tr.Name); err == nil {
- // The name is url-like so its not a local reference.
- tr.Name = substitution.ApplyReplacements(tr.Name, stringReplacements)
- url = tr.Name
- }
} else {
replacedParams = append(replacedParams, tr.Params...)
}
- resolverPayload := remoteresource.ResolverPayload{
- Name: trName,
- Namespace: namespace,
- ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
- Params: replacedParams,
- URL: url,
- },
- }
- resolver := resolution.NewResolver(requester, owner, string(tr.Resolver), resolverPayload)
+ resolver := resolution.NewResolver(requester, owner, string(tr.Resolver), trName, namespace, replacedParams)
return resolveTask(ctx, resolver, name, namespace, kind, k8s, tekton, verificationPolicies)
}
@@ -138,24 +128,16 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset
// It also requires a kubeclient, tektonclient, requester in case it needs to find that task in
// cluster or authorize against an external repository. It will figure out whether it needs to look in the cluster or in
// a remote location to fetch the reference.
-func GetStepActionFunc(tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester, tr *v1.TaskRun, taskSpec v1.TaskSpec, step *v1.Step) GetStepAction {
+func GetStepActionFunc(tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester, tr *v1.TaskRun, step *v1.Step) GetStepAction {
trName := tr.Name
namespace := tr.Namespace
if step.Ref != nil && step.Ref.Resolver != "" && requester != nil {
// Return an inline function that implements GetStepAction by calling Resolver.Get with the specified StepAction type and
// casting it to a StepAction.
- return func(ctx context.Context, name string) (*v1beta1.StepAction, *v1.RefSource, error) {
+ return func(ctx context.Context, name string) (*v1alpha1.StepAction, *v1.RefSource, error) {
// Perform params replacements for StepAction resolver params
- ApplyParameterSubstitutionInResolverParams(tr, taskSpec, step)
- resolverPayload := remoteresource.ResolverPayload{
- Name: trName,
- Namespace: namespace,
- ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
- Params: step.Ref.Params,
- URL: step.Ref.Name,
- },
- }
- resolver := resolution.NewResolver(requester, tr, string(step.Ref.Resolver), resolverPayload)
+ ApplyParameterSubstitutionInResolverParams(tr, step)
+ resolver := resolution.NewResolver(requester, tr, string(step.Ref.Resolver), trName, namespace, step.Ref.Params)
return resolveStepAction(ctx, resolver, name, namespace, k8s, tekton)
}
}
@@ -167,14 +149,14 @@ func GetStepActionFunc(tekton clientset.Interface, k8s kubernetes.Interface, req
}
// ApplyParameterSubstitutionInResolverParams applies parameter substitutions in resolver params for Step Ref.
-func ApplyParameterSubstitutionInResolverParams(tr *v1.TaskRun, taskSpec v1.TaskSpec, step *v1.Step) {
+func ApplyParameterSubstitutionInResolverParams(tr *v1.TaskRun, step *v1.Step) {
stringReplacements := make(map[string]string)
arrayReplacements := make(map[string][]string)
objectReplacements := make(map[string]map[string]string)
-
- defaultSR, defaultAR, defaultOR := replacementsFromDefaultParams(taskSpec.Params)
- stringReplacements, arrayReplacements, objectReplacements = extendReplacements(stringReplacements, arrayReplacements, objectReplacements, defaultSR, defaultAR, defaultOR)
-
+ if tr.Spec.TaskSpec != nil {
+ defaultSR, defaultAR, defaultOR := replacementsFromDefaultParams(tr.Spec.TaskSpec.Params)
+ stringReplacements, arrayReplacements, objectReplacements = extendReplacements(stringReplacements, arrayReplacements, objectReplacements, defaultSR, defaultAR, defaultOR)
+ }
paramSR, paramAR, paramOR := replacementsFromParams(tr.Spec.Params)
stringReplacements, arrayReplacements, objectReplacements = extendReplacements(stringReplacements, arrayReplacements, objectReplacements, paramSR, paramAR, paramOR)
step.Ref.Params = step.Ref.Params.ReplaceVariables(stringReplacements, arrayReplacements, objectReplacements)
@@ -225,47 +207,22 @@ func resolveTask(ctx context.Context, resolver remote.Resolver, name, namespace
return taskObj, refSource, vr, nil
}
-func resolveStepAction(ctx context.Context, resolver remote.Resolver, name, namespace string, k8s kubernetes.Interface, tekton clientset.Interface) (*v1beta1.StepAction, *v1.RefSource, error) {
+func resolveStepAction(ctx context.Context, resolver remote.Resolver, name, namespace string, k8s kubernetes.Interface, tekton clientset.Interface) (*v1alpha1.StepAction, *v1.RefSource, error) {
obj, refSource, err := resolver.Get(ctx, "StepAction", name)
if err != nil {
return nil, nil, err
}
- switch obj := obj.(type) {
- case *v1beta1.StepAction:
- // Cleanup object from things we don't care about
- // FIXME: extract this in a function
- obj.ObjectMeta.OwnerReferences = nil
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
- return nil, nil, err
- }
- if mutatedStepAction, ok := o.(*v1beta1.StepAction); ok {
- mutatedStepAction.ObjectMeta = obj.ObjectMeta
- return mutatedStepAction, refSource, nil
- }
+ switch obj := obj.(type) { //nolint:gocritic
case *v1alpha1.StepAction:
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
+ if err := apiserver.DryRunValidate(ctx, namespace, obj, tekton); err != nil {
return nil, nil, err
}
- if mutatedStepAction, ok := o.(*v1alpha1.StepAction); ok {
- mutatedStepAction.ObjectMeta = obj.ObjectMeta
- v1BetaStepAction := v1beta1.StepAction{
- TypeMeta: metav1.TypeMeta{
- Kind: "StepAction",
- APIVersion: "tekton.dev/v1beta1",
- },
- }
- err := mutatedStepAction.ConvertTo(ctx, &v1BetaStepAction)
- if err != nil {
- return nil, nil, err
- }
- return &v1BetaStepAction, refSource, nil
- }
+
+ return obj, refSource, nil
}
return nil, nil, errors.New("resource is not a StepAction")
}
@@ -290,42 +247,31 @@ func readRuntimeObjectAsTask(ctx context.Context, namespace string, obj runtime.
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Task, so that it can undergo validation from validating admission webhooks
// without actually creating the Task on the cluster.
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
+ if err := apiserver.DryRunValidate(ctx, namespace, obj, tekton); err != nil {
return nil, nil, err
}
- if mutatedTask, ok := o.(*v1beta1.Task); ok {
- t := &v1.Task{
- TypeMeta: metav1.TypeMeta{
- Kind: "Task",
- APIVersion: "tekton.dev/v1",
- },
- }
- mutatedTask.ObjectMeta = obj.ObjectMeta
- if err := mutatedTask.ConvertTo(ctx, t); err != nil {
- return nil, nil, fmt.Errorf("failed to convert obj %s into Pipeline", mutatedTask.GetObjectKind().GroupVersionKind().String())
- }
- return t, &vr, nil
+ t := &v1.Task{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Task",
+ APIVersion: "tekton.dev/v1",
+ },
+ }
+ if err := obj.ConvertTo(ctx, t); err != nil {
+ return nil, nil, fmt.Errorf("failed to convert obj %s into Pipeline", obj.GetObjectKind().GroupVersionKind().String())
}
+ return t, &vr, nil
case *v1beta1.ClusterTask:
obj.SetDefaults(ctx)
// Cleanup object from things we don't care about
// FIXME: extract this in a function
obj.ObjectMeta.OwnerReferences = nil
t, err := convertClusterTaskToTask(ctx, *obj)
- if err != nil {
- return nil, nil, err
- }
// Issue a dry-run request to create the remote Task, so that it can undergo validation from validating admission webhooks
// without actually creating the Task on the cluster
- o, err := apiserver.DryRunValidate(ctx, namespace, t, tekton)
- if err != nil {
+ if err := apiserver.DryRunValidate(ctx, namespace, t, tekton); err != nil {
return nil, nil, err
}
- if mutatedTask, ok := o.(*v1.Task); ok {
- mutatedTask.ObjectMeta = obj.ObjectMeta
- return mutatedTask, nil, nil
- }
+ return t, nil, err
case *v1.Task:
// This SetDefaults is currently not necessary, but for consistency, it is recommended to add it.
// Avoid forgetting to add it in the future when there is a v2 version, causing similar problems.
@@ -336,14 +282,10 @@ func readRuntimeObjectAsTask(ctx context.Context, namespace string, obj runtime.
vr := trustedresources.VerifyResource(ctx, obj, k8s, refSource, verificationPolicies)
// Issue a dry-run request to create the remote Task, so that it can undergo validation from validating admission webhooks
// without actually creating the Task on the cluster
- o, err := apiserver.DryRunValidate(ctx, namespace, obj, tekton)
- if err != nil {
+ if err := apiserver.DryRunValidate(ctx, namespace, obj, tekton); err != nil {
return nil, nil, err
}
- if mutatedTask, ok := o.(*v1.Task); ok {
- mutatedTask.ObjectMeta = obj.ObjectMeta
- return mutatedTask, &vr, nil
- }
+ return obj, &vr, nil
}
return nil, nil, errors.New("resource is not a task")
}
@@ -387,18 +329,23 @@ type LocalStepActionRefResolver struct {
// GetStepAction will resolve a StepAction from the local cluster using a versioned Tekton client.
// It will return an error if it can't find an appropriate StepAction for any reason.
-func (l *LocalStepActionRefResolver) GetStepAction(ctx context.Context, name string) (*v1beta1.StepAction, *v1.RefSource, error) {
+func (l *LocalStepActionRefResolver) GetStepAction(ctx context.Context, name string) (*v1alpha1.StepAction, *v1.RefSource, error) {
// If we are going to resolve this reference locally, we need a namespace scope.
if l.Namespace == "" {
return nil, nil, fmt.Errorf("must specify namespace to resolve reference to step action %s", name)
}
- stepAction, err := l.Tektonclient.TektonV1beta1().StepActions(l.Namespace).Get(ctx, name, metav1.GetOptions{})
+ stepAction, err := l.Tektonclient.TektonV1alpha1().StepActions(l.Namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
return stepAction, nil, nil
}
+// IsErrTransient returns true if an error returned by GetTask/GetStepAction is retryable.
+func IsErrTransient(err error) bool {
+ return strings.Contains(err.Error(), errEtcdLeaderChange)
+}
+
// convertClusterTaskToTask converts deprecated v1beta1 ClusterTasks to Tasks for
// the rest of reconciling process since GetTask func and its upstream callers only
// fetches the task spec and stores it in the taskrun status while the kind info
diff --git a/upstream/pkg/reconciler/taskrun/resources/taskref_test.go b/upstream/pkg/reconciler/taskrun/resources/taskref_test.go
index 526e37aa69b..1f81947dd95 100644
--- a/upstream/pkg/reconciler/taskrun/resources/taskref_test.go
+++ b/upstream/pkg/reconciler/taskrun/resources/taskref_test.go
@@ -34,16 +34,13 @@ import (
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- resolutionV1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
"github.com/tektoncd/pipeline/pkg/reconciler/apiserver"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/parse"
- resolution "github.com/tektoncd/pipeline/test/remoteresolution"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -54,16 +51,16 @@ import (
)
var (
- simpleNamespacedStepAction = &v1beta1.StepAction{
+ simpleNamespacedStepAction = &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "default",
},
TypeMeta: metav1.TypeMeta{
- APIVersion: "tekton.dev/v1beta1",
+ APIVersion: "tekton.dev/v1alpha1",
Kind: "StepAction",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "something",
},
}
@@ -347,7 +344,6 @@ func TestStepActionResolverParamReplacements(t *testing.T) {
name string
namespace string
taskrun *v1.TaskRun
- taskSpec *v1.TaskSpec
want *v1.Step
}{{
name: "default taskspec parms",
@@ -586,53 +582,11 @@ func TestStepActionResolverParamReplacements(t *testing.T) {
},
},
},
- }, {
- name: "defaults from remote task",
- namespace: "default",
- taskrun: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{Name: "some-tr"},
- Spec: v1.TaskRunSpec{
- TaskRef: &v1.TaskRef{
- Name: "resolved-task-name",
- },
- },
- },
- taskSpec: &v1.TaskSpec{
- Params: []v1.ParamSpec{{
- Name: "resolver-param",
- Default: v1.NewStructuredValues("foo/bar"),
- }},
- Steps: []v1.Step{{
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "git",
- Params: []v1.Param{{
- Name: "pathInRepo",
- Value: *v1.NewStructuredValues("$(params.resolver-param)"),
- }},
- },
- },
- }},
- },
- want: &v1.Step{
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "git",
- Params: []v1.Param{{
- Name: "pathInRepo",
- Value: *v1.NewStructuredValues("foo/bar"),
- }},
- },
- },
- },
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- if tc.taskSpec == nil {
- tc.taskSpec = tc.taskrun.Spec.TaskSpec
- }
- step := &tc.taskSpec.Steps[0]
- resources.ApplyParameterSubstitutionInResolverParams(tc.taskrun, *tc.taskSpec, step)
+ step := &tc.taskrun.Spec.TaskSpec.Steps[0]
+ resources.ApplyParameterSubstitutionInResolverParams(tc.taskrun, step)
if d := cmp.Diff(tc.want, step); tc.want != nil && d != "" {
t.Error(diff.PrintWantGot(d))
}
@@ -651,13 +605,13 @@ func TestStepActionRef(t *testing.T) {
name: "local-step-action",
namespace: "default",
stepactions: []runtime.Object{
- &v1beta1.StepAction{
+ &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "default",
},
},
- &v1beta1.StepAction{
+ &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "sample",
Namespace: "default",
@@ -667,7 +621,7 @@ func TestStepActionRef(t *testing.T) {
ref: &v1.Ref{
Name: "simple",
},
- expected: &v1beta1.StepAction{
+ expected: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "default",
@@ -724,7 +678,7 @@ func TestStepActionRef_Error(t *testing.T) {
name: "local-step-action-missing-namespace",
namespace: "",
stepactions: []runtime.Object{
- &v1beta1.StepAction{
+ &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "simple",
Namespace: "default",
@@ -904,7 +858,7 @@ func TestGetStepActionFunc_Local(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
tektonclient := fake.NewSimpleClientset(tc.localStepActions...)
- fn := resources.GetStepActionFunc(tektonclient, nil, nil, tc.taskRun, *tc.taskRun.Spec.TaskSpec, &tc.taskRun.Spec.TaskSpec.Steps[0])
+ fn := resources.GetStepActionFunc(tektonclient, nil, nil, tc.taskRun, &tc.taskRun.Spec.TaskSpec.Steps[0])
stepAction, refSource, err := fn(ctx, tc.taskRun.Spec.TaskSpec.Steps[0].Ref.Name)
if err != nil {
@@ -930,29 +884,21 @@ func TestGetStepActionFunc_RemoteResolution_Success(t *testing.T) {
testcases := []struct {
name string
stepActionYAML string
- wantStepAction *v1beta1.StepAction
+ wantStepAction *v1alpha1.StepAction
wantErr bool
}{{
- name: "remote StepAction v1alpha1",
+ name: "remote StepAction",
stepActionYAML: strings.Join([]string{
"kind: StepAction",
"apiVersion: tekton.dev/v1alpha1",
stepActionYAMLString,
}, "\n"),
- wantStepAction: parse.MustParseV1beta1StepAction(t, stepActionYAMLString),
- }, {
- name: "remote StepAction v1beta1",
- stepActionYAML: strings.Join([]string{
- "kind: StepAction",
- "apiVersion: tekton.dev/v1beta1",
- stepActionYAMLString,
- }, "\n"),
- wantStepAction: parse.MustParseV1beta1StepAction(t, stepActionYAMLString),
+ wantStepAction: parse.MustParseV1alpha1StepAction(t, stepActionYAMLString),
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- resolved := resolution.NewResolvedResource([]byte(tc.stepActionYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+ resolved := test.NewResolvedResource([]byte(tc.stepActionYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
+ requester := test.NewRequester(resolved, nil)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.TaskRunSpec{
@@ -965,7 +911,7 @@ func TestGetStepActionFunc_RemoteResolution_Success(t *testing.T) {
},
}
tektonclient := fake.NewSimpleClientset()
- fn := resources.GetStepActionFunc(tektonclient, nil, requester, tr, *tr.Spec.TaskSpec, &tr.Spec.TaskSpec.Steps[0])
+ fn := resources.GetStepActionFunc(tektonclient, nil, requester, tr, &tr.Spec.TaskSpec.Steps[0])
resolvedStepAction, resolvedRefSource, err := fn(ctx, tr.Spec.TaskSpec.Steps[0].Ref.Name)
if tc.wantErr {
@@ -1012,8 +958,8 @@ func TestGetStepActionFunc_RemoteResolution_Error(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- res := resolution.NewResolvedResource(tc.resolvesTo, nil, nil, nil)
- requester := resolution.NewRequester(res, nil, resource.ResolverPayload{})
+ resource := test.NewResolvedResource(tc.resolvesTo, nil, nil, nil)
+ requester := test.NewRequester(resource, nil)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.TaskRunSpec{
@@ -1026,7 +972,7 @@ func TestGetStepActionFunc_RemoteResolution_Error(t *testing.T) {
},
}
tektonclient := fake.NewSimpleClientset()
- fn := resources.GetStepActionFunc(tektonclient, nil, requester, tr, *tr.Spec.TaskSpec, &tr.Spec.TaskSpec.Steps[0])
+ fn := resources.GetStepActionFunc(tektonclient, nil, requester, tr, &tr.Spec.TaskSpec.Steps[0])
if _, _, err := fn(ctx, tr.Spec.TaskSpec.Steps[0].Ref.Name); err == nil {
t.Fatalf("expected error due to invalid pipeline data but saw none")
}
@@ -1144,8 +1090,8 @@ func TestGetTaskFunc_RemoteResolution(t *testing.T) {
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- resolved := resolution.NewResolvedResource([]byte(tc.taskYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+ resolved := test.NewResolvedResource([]byte(tc.taskYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
+ requester := test.NewRequester(resolved, nil)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.TaskRunSpec{
@@ -1211,8 +1157,8 @@ func TestGetTaskFunc_RemoteResolution_ValidationFailure(t *testing.T) {
}}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- resolved := resolution.NewResolvedResource([]byte(tc.taskYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+ resolved := test.NewResolvedResource([]byte(tc.taskYAML), nil /* annotations */, sampleRefSource.DeepCopy(), nil /* data error */)
+ requester := test.NewRequester(resolved, nil)
tektonclient := fake.NewSimpleClientset()
fn := resources.GetTaskFunc(ctx, nil, tektonclient, requester, &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
@@ -1245,7 +1191,6 @@ func TestGetTaskFunc_RemoteResolution_ReplacedParams(t *testing.T) {
ctx = config.ToContext(ctx, cfg)
task := parse.MustParseV1TaskAndSetDefaults(t, taskYAMLString)
taskRef := &v1.TaskRef{
- Name: "https://foo/bar",
ResolverRef: v1.ResolverRef{
Resolver: "git",
Params: []v1.Param{{
@@ -1263,21 +1208,16 @@ func TestGetTaskFunc_RemoteResolution_ReplacedParams(t *testing.T) {
taskYAMLString,
}, "\n")
- resolved := resolution.NewResolvedResource([]byte(taskYAML), nil, sampleRefSource.DeepCopy(), nil)
- requester := &resolution.Requester{
+ resolved := test.NewResolvedResource([]byte(taskYAML), nil, sampleRefSource.DeepCopy(), nil)
+ requester := &test.Requester{
ResolvedResource: resolved,
- ResolverPayload: resource.ResolverPayload{
- ResolutionSpec: &resolutionV1beta1.ResolutionRequestSpec{
- Params: v1.Params{{
- Name: "foo",
- Value: *v1.NewStructuredValues("bar"),
- }, {
- Name: "bar",
- Value: *v1.NewStructuredValues("test-task"),
- }},
- URL: "https://foo/bar",
- },
- },
+ Params: v1.Params{{
+ Name: "foo",
+ Value: *v1.NewStructuredValues("bar"),
+ }, {
+ Name: "bar",
+ Value: *v1.NewStructuredValues("test-task"),
+ }},
}
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
@@ -1353,8 +1293,8 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) {
ctx = config.ToContext(ctx, cfg)
taskRef := &v1.TaskRef{ResolverRef: v1.ResolverRef{Resolver: "git"}}
resolvesTo := []byte("INVALID YAML")
- res := resolution.NewResolvedResource(resolvesTo, nil, nil, nil)
- requester := resolution.NewRequester(res, nil, resource.ResolverPayload{})
+ resource := test.NewResolvedResource(resolvesTo, nil, nil, nil)
+ requester := test.NewRequester(resource, nil)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "default"},
Spec: v1.TaskRunSpec{
@@ -1368,6 +1308,7 @@ func TestGetPipelineFunc_RemoteResolutionInvalidData(t *testing.T) {
}
}
+//nolint:musttag
func TestGetTaskFunc_V1beta1Task_VerifyNoError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -1414,7 +1355,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyNoError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
policies []*v1alpha1.VerificationPolicy
expected runtime.Object
@@ -1473,7 +1414,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyNoError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(context.Background(), tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.TaskRunSpec{
@@ -1502,6 +1443,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyNoError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetTaskFunc_V1beta1Task_VerifyError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -1542,7 +1484,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
expected *v1.Task
expectedErr error
@@ -1601,7 +1543,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.TaskRunSpec{
@@ -1622,6 +1564,7 @@ func TestGetTaskFunc_V1beta1Task_VerifyError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetTaskFunc_V1Task_VerifyNoError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -1678,7 +1621,7 @@ func TestGetTaskFunc_V1Task_VerifyNoError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
policies []*v1alpha1.VerificationPolicy
expected runtime.Object
@@ -1737,7 +1680,7 @@ func TestGetTaskFunc_V1Task_VerifyNoError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.TaskRunSpec{
@@ -1766,6 +1709,7 @@ func TestGetTaskFunc_V1Task_VerifyNoError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetTaskFunc_V1Task_VerifyError(t *testing.T) {
ctx := context.Background()
signer, _, k8sclient, vps := test.SetupVerificationPolicies(t)
@@ -1806,7 +1750,7 @@ func TestGetTaskFunc_V1Task_VerifyError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
verificationNoMatchPolicy string
expected *v1.Task
expectedErr error
@@ -1863,7 +1807,7 @@ func TestGetTaskFunc_V1Task_VerifyError(t *testing.T) {
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
- ctx := test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
+ ctx = test.SetupTrustedResourceConfig(ctx, tc.verificationNoMatchPolicy)
tr := &v1.TaskRun{
ObjectMeta: metav1.ObjectMeta{Namespace: "trusted-resources"},
Spec: v1.TaskRunSpec{
@@ -1881,6 +1825,7 @@ func TestGetTaskFunc_V1Task_VerifyError(t *testing.T) {
}
}
+//nolint:musttag
func TestGetTaskFunc_GetFuncError(t *testing.T) {
ctx := context.Background()
_, k8sclient, vps := test.SetupMatchAllVerificationPolicies(t, "trusted-resources")
@@ -1892,8 +1837,8 @@ func TestGetTaskFunc_GetFuncError(t *testing.T) {
t.Fatal("fail to marshal task", err)
}
- resolvedUnsigned := resolution.NewResolvedResource(unsignedTaskBytes, nil, sampleRefSource.DeepCopy(), nil)
- requesterUnsigned := resolution.NewRequester(resolvedUnsigned, nil, resource.ResolverPayload{})
+ resolvedUnsigned := test.NewResolvedResource(unsignedTaskBytes, nil, sampleRefSource.DeepCopy(), nil)
+ requesterUnsigned := test.NewRequester(resolvedUnsigned, nil)
resolvedUnsigned.DataErr = errors.New("resolution error")
trResolutionError := &v1.TaskRun{
@@ -1911,7 +1856,7 @@ func TestGetTaskFunc_GetFuncError(t *testing.T) {
testcases := []struct {
name string
- requester *resolution.Requester
+ requester *test.Requester
taskrun v1.TaskRun
expectedErr error
}{
@@ -1935,7 +1880,7 @@ func TestGetTaskFunc_GetFuncError(t *testing.T) {
},
}
store.OnConfigChanged(featureflags)
- ctx := store.ToContext(ctx)
+ ctx = store.ToContext(ctx)
fn := resources.GetTaskFunc(ctx, k8sclient, tektonclient, tc.requester, &tc.taskrun, tc.taskrun.Spec.TaskRef, "", "default", "default", vps)
@@ -1962,7 +1907,7 @@ spec:
- "bar"
steps:
- name: step1
- image: docker.io/library/ubuntu
+ image: ubuntu
script: |
echo "hello world!"
`
@@ -1987,9 +1932,9 @@ spec:
- name: foo
`
-func bytesToRequester(data []byte, source *v1.RefSource) *resolution.Requester {
- resolved := resolution.NewResolvedResource(data, nil, source, nil)
- requester := resolution.NewRequester(resolved, nil, resource.ResolverPayload{})
+func bytesToRequester(data []byte, source *v1.RefSource) *test.Requester {
+ resolved := test.NewResolvedResource(data, nil, source, nil)
+ requester := test.NewRequester(resolved, nil)
return requester
}
diff --git a/upstream/pkg/reconciler/taskrun/resources/taskspec.go b/upstream/pkg/reconciler/taskrun/resources/taskspec.go
index e9b683d7520..64d71df04e8 100644
--- a/upstream/pkg/reconciler/taskrun/resources/taskspec.go
+++ b/upstream/pkg/reconciler/taskrun/resources/taskspec.go
@@ -22,11 +22,10 @@ import (
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
- "github.com/tektoncd/pipeline/pkg/pod"
- remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
+ remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@@ -43,7 +42,7 @@ type ResolvedTask struct {
}
// GetStepAction is a function used to retrieve StepActions.
-type GetStepAction func(context.Context, string) (*v1beta1.StepAction, *v1.RefSource, error)
+type GetStepAction func(context.Context, string) (*v1alpha1.StepAction, *v1.RefSource, error)
// GetTask is a function used to retrieve Tasks.
// VerificationResult is the result from trusted resources if the feature is enabled.
@@ -104,40 +103,14 @@ func GetTaskData(ctx context.Context, taskRun *v1.TaskRun, getTask GetTask) (*re
// GetStepActionsData extracts the StepActions and merges them with the inlined Step specification.
func GetStepActionsData(ctx context.Context, taskSpec v1.TaskSpec, taskRun *v1.TaskRun, tekton clientset.Interface, k8s kubernetes.Interface, requester remoteresource.Requester) ([]v1.Step, error) {
steps := []v1.Step{}
- for i, step := range taskSpec.Steps {
+ for _, step := range taskSpec.Steps {
s := step.DeepCopy()
if step.Ref != nil {
- getStepAction := GetStepActionFunc(tekton, k8s, requester, taskRun, taskSpec, s)
- stepAction, source, err := getStepAction(ctx, s.Ref.Name)
+ getStepAction := GetStepActionFunc(tekton, k8s, requester, taskRun, s)
+ stepAction, _, err := getStepAction(ctx, s.Ref.Name)
if err != nil {
return nil, err
}
- // update stepstate with remote origin information
- if source != nil {
- found := false
- for i, st := range taskRun.Status.Steps {
- if st.Name == s.Name {
- found = true
- if st.Provenance != nil {
- taskRun.Status.Steps[i].Provenance.RefSource = source
- } else {
- taskRun.Status.Steps[i].Provenance = &v1.Provenance{RefSource: source}
- }
- break
- }
- }
- if !found {
- stp := v1.StepState{
- Name: pod.TrimStepPrefix(pod.StepName(s.Name, i)),
- Provenance: &v1.Provenance{RefSource: source},
- }
- if len(taskRun.Status.Steps) == 0 {
- taskRun.Status.Steps = []v1.StepState{stp}
- } else {
- taskRun.Status.Steps = append(taskRun.Status.Steps, stp)
- }
- }
- }
stepActionSpec := stepAction.StepActionSpec()
stepActionSpec.SetDefaults(ctx)
@@ -145,11 +118,7 @@ func GetStepActionsData(ctx context.Context, taskSpec v1.TaskSpec, taskRun *v1.T
if err := validateStepHasStepActionParameters(s.Params, stepActionSpec.Params); err != nil {
return nil, err
}
-
- stepFromStepAction, err = applyStepActionParameters(stepFromStepAction, &taskSpec, taskRun, s.Params, stepActionSpec.Params)
- if err != nil {
- return nil, err
- }
+ stepFromStepAction = applyStepActionParameters(stepFromStepAction, &taskSpec, taskRun, s.Params, stepActionSpec.Params)
s.Image = stepFromStepAction.Image
s.SecurityContext = stepFromStepAction.SecurityContext
diff --git a/upstream/pkg/reconciler/taskrun/resources/taskspec_test.go b/upstream/pkg/reconciler/taskrun/resources/taskspec_test.go
index f2fb37ee42c..361ce2f0050 100644
--- a/upstream/pkg/reconciler/taskrun/resources/taskspec_test.go
+++ b/upstream/pkg/reconciler/taskrun/resources/taskspec_test.go
@@ -25,17 +25,13 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+ "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake"
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
"github.com/tektoncd/pipeline/pkg/trustedresources"
"github.com/tektoncd/pipeline/test/diff"
- "github.com/tektoncd/pipeline/test/parse"
- test "github.com/tektoncd/pipeline/test/remoteresolution"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/yaml"
)
func TestGetTaskSpec_Ref(t *testing.T) {
@@ -201,7 +197,7 @@ func TestGetTaskData_ResolutionSuccess(t *testing.T) {
}
if d := cmp.Diff(sourceSpec, *resolvedSpec); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
}
@@ -297,343 +293,7 @@ func TestGetTaskData_VerificationResult(t *testing.T) {
t.Fatalf("Did not expect error but got: %s", err)
}
if d := cmp.Diff(verificationResult, r.VerificationResult, cmpopts.EquateErrors()); d != "" {
- t.Error(diff.PrintWantGot(d))
- }
-}
-
-func TestGetStepActionsData_Provenance(t *testing.T) {
- source := v1.RefSource{
- URI: "ref-source",
- Digest: map[string]string{"sha256": "abcd123456"},
- }
- stepAction := parse.MustParseV1beta1StepAction(t, `
-metadata:
- name: stepAction
- namespace: foo
-spec:
- image: myImage
- command: ["ls"]
-`)
-
- stepActionBytes, err := yaml.Marshal(stepAction)
- if err != nil {
- t.Fatal("failed to marshal StepAction", err)
- }
- rr := test.NewResolvedResource(stepActionBytes, map[string]string{}, &source, nil)
- requester := test.NewRequester(rr, nil, resource.ResolverPayload{})
- tests := []struct {
- name string
- tr *v1.TaskRun
- want *v1.TaskRun
- }{{
- name: "remote-step-action-with-provenance",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "stepname",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- },
- want: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "stepname",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "stepname",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }},
- },
- },
- },
- }, {
- name: "multiple-remote-step-actions-with-provenance",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }, {
- Name: "step2",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- },
- want: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }, {
- Name: "step2",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "step1",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }, {
- Name: "step2",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }},
- },
- },
- },
- }, {
- name: "remote-step-action-with-existing-provenance",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "step1",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }},
- },
- },
- },
- want: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "step1",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }},
- },
- },
- },
- }, {
- name: "remote-step-action-with-missing-provenance",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "step1",
- }},
- },
- },
- },
- want: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "step1",
- Ref: &v1.Ref{
- ResolverRef: v1.ResolverRef{
- Resolver: "foo",
- Params: []v1.Param{{
- Name: "bar",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "baz",
- },
- }},
- },
- },
- }},
- },
- },
- Status: v1.TaskRunStatus{
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Steps: []v1.StepState{{
- Name: "step1",
- Provenance: &v1.Provenance{
- RefSource: &source,
- },
- }},
- },
- },
- },
- }}
- for _, tt := range tests {
- ctx := context.Background()
- tektonclient := fake.NewSimpleClientset(stepAction)
- _, err := resources.GetStepActionsData(ctx, *tt.tr.Spec.TaskSpec, tt.tr, tektonclient, nil, requester)
- if err != nil {
- t.Fatalf("Did not expect an error but got : %s", err)
- }
- if d := cmp.Diff(tt.want, tt.tr); d != "" {
- t.Errorf("the taskrun did not match what was expected diff: %s", diff.PrintWantGot(d))
- }
+ t.Errorf(diff.PrintWantGot(d))
}
}
@@ -643,7 +303,7 @@ func TestGetStepActionsData(t *testing.T) {
tests := []struct {
name string
tr *v1.TaskRun
- stepAction *v1beta1.StepAction
+ stepAction *v1alpha1.StepAction
want []v1.Step
}{{
name: "step-action-with-command-args",
@@ -663,12 +323,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Command: []string{"ls"},
Args: []string{"-lh"},
@@ -705,12 +365,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepActionWithScript",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Script: "ls",
},
@@ -736,12 +396,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepActionWithEnv",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Env: []corev1.EnvVar{{
Name: "env1",
@@ -773,12 +433,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepActionWithScript",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Script: "ls",
Results: []v1.StepResult{{
@@ -812,12 +472,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Command: []string{"ls"},
Args: []string{"-lh"},
@@ -850,12 +510,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Command: []string{"ls"},
Args: []string{"-lh"},
@@ -914,12 +574,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"$(params.string-param)", "$(params.array-param[0])", "$(params.array-param[1])", "$(params.array-param[*])", "$(params.object-param.key)"},
Params: v1.ParamSpecs{{
@@ -985,12 +645,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"$(params.string-param)", "$(params.array-param[0])", "$(params.array-param[1])", "$(params.array-param[*])", "$(params.object-param.key)"},
Params: v1.ParamSpecs{{
@@ -1027,12 +687,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"$(params.string-param)", "$(params.array-param[0])", "$(params.array-param[1])", "$(params.array-param[*])", "$(params.object-param.key)"},
Params: v1.ParamSpecs{{
@@ -1117,12 +777,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"$(params.string-param)", "$(params.array-param[0])", "$(params.array-param[1])", "$(params.array-param[*])", "$(params.object-param.key)", "$(params.object-param.key2)", "$(params.object-param.key3)"},
Params: v1.ParamSpecs{{
@@ -1188,12 +848,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"echo", "$(params.stringparam)"},
Params: v1.ParamSpecs{{
@@ -1248,12 +908,12 @@ func TestGetStepActionsData(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepAction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Args: []string{"$(params.string-param)", "$(params.array-param[0])", "$(params.array-param[1])", "$(params.array-param[*])", "$(params.object-param.key)"},
Command: []string{"$(params[\"string-param\"])", "$(params[\"array-param\"][0])"},
@@ -1301,54 +961,6 @@ func TestGetStepActionsData(t *testing.T) {
Value: "$(steps.inlined-step.results.result1)",
}},
}},
- }, {
- name: "param types are matching",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "test",
- Ref: &v1.Ref{Name: "stepAction"},
- Params: v1.Params{{
- Name: "commands",
- Value: v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{"Hello, I am of type list"},
- },
- }},
- }},
- },
- },
- },
- stepAction: &v1beta1.StepAction{
- ObjectMeta: metav1.ObjectMeta{
- Name: "stepAction",
- Namespace: "default",
- },
- Spec: v1beta1.StepActionSpec{
- Image: "myimage",
- Args: []string{"$(params.commands)"},
- Script: "echo $@",
- Params: v1.ParamSpecs{{
- Name: "commands",
- Type: v1.ParamTypeArray,
- Default: &v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{"Hello, I am the default value"},
- },
- }},
- },
- },
- want: []v1.Step{{
- Name: "test",
- Image: "myimage",
- Args: []string{"Hello, I am of type list"},
- Script: "echo $@",
- }},
}}
for _, tt := range tests {
ctx := context.Background()
@@ -1368,7 +980,7 @@ func TestGetStepActionsData_Error(t *testing.T) {
tests := []struct {
name string
tr *v1.TaskRun
- stepAction *v1beta1.StepAction
+ stepAction *v1alpha1.StepAction
expectedError error
}{{
name: "namespace missing error",
@@ -1386,7 +998,7 @@ func TestGetStepActionsData_Error(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{},
+ stepAction: &v1alpha1.StepAction{},
expectedError: errors.New("must specify namespace to resolve reference to step action stepActionError"),
}, {
name: "params missing",
@@ -1405,12 +1017,12 @@ func TestGetStepActionsData_Error(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepaction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
Params: v1.ParamSpecs{{
Name: "string-param",
@@ -1440,56 +1052,16 @@ func TestGetStepActionsData_Error(t *testing.T) {
},
},
},
- stepAction: &v1beta1.StepAction{
+ stepAction: &v1alpha1.StepAction{
ObjectMeta: metav1.ObjectMeta{
Name: "stepaction",
Namespace: "default",
},
- Spec: v1beta1.StepActionSpec{
+ Spec: v1alpha1.StepActionSpec{
Image: "myimage",
},
},
expectedError: errors.New("extra params passed by Step to StepAction: [string-param]"),
- }, {
- name: "param types not matching",
- tr: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "mytaskrun",
- Namespace: "default",
- },
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{
- Steps: []v1.Step{{
- Name: "test",
- Ref: &v1.Ref{Name: "stepAction"},
- Params: v1.Params{{
- Name: "commands",
- Value: *v1.NewStructuredValues("Hello, I am of type string"),
- }},
- }},
- },
- },
- },
- stepAction: &v1beta1.StepAction{
- ObjectMeta: metav1.ObjectMeta{
- Name: "stepAction",
- Namespace: "default",
- },
- Spec: v1beta1.StepActionSpec{
- Image: "myimage",
- Args: []string{"$(params.commands)"},
- Script: "echo $@",
- Params: v1.ParamSpecs{{
- Name: "commands",
- Type: v1.ParamTypeArray,
- Default: &v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{"Hello, I am the default value"},
- },
- }},
- },
- },
- expectedError: errors.New("invalid parameter substitution: commands. Please check the types of the default value and the passed value"),
}}
for _, tt := range tests {
ctx := context.Background()
diff --git a/upstream/pkg/reconciler/taskrun/taskrun.go b/upstream/pkg/reconciler/taskrun/taskrun.go
index 56fdce49918..a11942fcb2a 100644
--- a/upstream/pkg/reconciler/taskrun/taskrun.go
+++ b/upstream/pkg/reconciler/taskrun/taskrun.go
@@ -22,7 +22,6 @@ import (
"fmt"
"reflect"
"slices"
- "strconv"
"strings"
"time"
@@ -39,7 +38,6 @@ import (
alphalisters "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/pkg/internal/affinityassistant"
"github.com/tektoncd/pipeline/pkg/internal/computeresources"
- "github.com/tektoncd/pipeline/pkg/internal/defaultresourcerequirements"
resolutionutil "github.com/tektoncd/pipeline/pkg/internal/resolution"
podconvert "github.com/tektoncd/pipeline/pkg/pod"
tknreconciler "github.com/tektoncd/pipeline/pkg/reconciler"
@@ -49,8 +47,7 @@ import (
"github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources"
"github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim"
"github.com/tektoncd/pipeline/pkg/remote"
- resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolution "github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/pkg/spire"
"github.com/tektoncd/pipeline/pkg/taskrunmetrics"
"github.com/tektoncd/pipeline/pkg/trustedresources"
@@ -153,28 +150,8 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1.TaskRun) pkgrecon
// and may not have had all of the assumed default specified.
tr.SetDefaults(ctx)
- // Check if current k8s version is less than 1.29
- // Since Kubernetes Major version cannot be 0 and if it's 2 then sidecar will be in
- // we are only concerned about major version 1 and if the minor is less than 29 then
- // we need to do the current logic
- useTektonSidecar := true
- if config.FromContextOrDefaults(ctx).FeatureFlags.EnableKubernetesSidecar {
- dc := c.KubeClientSet.Discovery()
- sv, err := dc.ServerVersion()
- if err != nil {
- return err
- }
- svMajorInt, _ := strconv.Atoi(sv.Major)
- svMinorInt, _ := strconv.Atoi(sv.Minor)
- if svMajorInt >= 1 && svMinorInt >= 29 {
- useTektonSidecar = false
- logger.Infof("Using Kubernetes Native Sidecars \n")
- }
- }
- if useTektonSidecar {
- if err := c.stopSidecars(ctx, tr); err != nil {
- return err
- }
+ if err := c.stopSidecars(ctx, tr); err != nil {
+ return err
}
return c.finishReconcileUpdateEmitEvents(ctx, tr, before, nil)
@@ -431,7 +408,7 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1.TaskRun) (*v1.TaskSpec,
return nil, nil, err
case err != nil:
logger.Errorf("Failed to determine Task spec to use for taskrun %s: %v", tr.Name, err)
- if resolutioncommon.IsErrTransient(err) {
+ if resources.IsErrTransient(err) {
return nil, nil, err
}
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedResolution, err)
@@ -456,7 +433,7 @@ func (c *Reconciler) prepare(ctx context.Context, tr *v1.TaskRun) (*v1.TaskSpec,
return nil, nil, err
case err != nil:
logger.Errorf("Failed to determine StepAction to use for TaskRun %s: %v", tr.Name, err)
- if resolutioncommon.IsErrTransient(err) {
+ if resources.IsErrTransient(err) {
return nil, nil, err
}
tr.Status.MarkResourceFailed(v1.TaskRunReasonFailedResolution, err)
@@ -902,7 +879,6 @@ func (c *Reconciler) createPod(ctx context.Context, ts *v1.TaskSpec, tr *v1.Task
EntrypointCache: c.entrypointCache,
}
pod, err := podbuilder.Build(ctx, tr, *ts,
- defaultresourcerequirements.NewTransformer(ctx),
computeresources.NewTransformer(ctx, tr.Namespace, c.limitrangeLister),
affinityassistant.NewTransformer(ctx, tr.Annotations),
)
@@ -1060,7 +1036,7 @@ func storeTaskSpecAndMergeMeta(ctx context.Context, tr *v1.TaskRun, ts *v1.TaskS
// Propagate labels from Task to TaskRun. TaskRun labels take precedences over Task.
tr.ObjectMeta.Labels = kmap.Union(meta.Labels, tr.ObjectMeta.Labels)
if tr.Spec.TaskRef != nil {
- if tr.Spec.TaskRef.Kind == v1.ClusterTaskRefKind {
+ if tr.Spec.TaskRef.Kind == "ClusterTask" {
tr.ObjectMeta.Labels[pipeline.ClusterTaskLabelKey] = meta.Name
} else {
tr.ObjectMeta.Labels[pipeline.TaskLabelKey] = meta.Name
diff --git a/upstream/pkg/reconciler/taskrun/taskrun_test.go b/upstream/pkg/reconciler/taskrun/taskrun_test.go
index 694d78e7bca..7e1b30f7ee5 100644
--- a/upstream/pkg/reconciler/taskrun/taskrun_test.go
+++ b/upstream/pkg/reconciler/taskrun/taskrun_test.go
@@ -314,13 +314,6 @@ var (
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
}
-
- artifactsVolume = corev1.Volume{
- Name: "tekton-internal-artifacts",
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- }
downwardVolume = corev1.Volume{
Name: "tekton-internal-downward",
VolumeSource: corev1.VolumeSource{
@@ -431,7 +424,6 @@ func TestReconcile_ExplicitDefaultSA(t *testing.T) {
metadata:
name: test-taskrun-run-success
namespace: foo
- uid: bar
spec:
taskRef:
apiVersion: v1
@@ -441,7 +433,6 @@ spec:
metadata:
name: test-taskrun-with-sa-run-success
namespace: foo
- uid: bar
spec:
serviceAccountName: test-sa
taskRef:
@@ -471,7 +462,7 @@ spec:
}{{
name: "success",
taskRun: taskRunSuccess,
- wantPod: expectedPod("test-taskrun-run-success-pod", "test-task", "test-taskrun-run-success", "bar", "foo", defaultSAName, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-run-success-pod", "test-task", "test-taskrun-run-success", "foo", defaultSAName, false, nil, []stepForExpectedPod{{
image: "foo",
name: "simple-step",
cmd: "/mycmd",
@@ -479,7 +470,7 @@ spec:
}, {
name: "serviceaccount",
taskRun: taskRunWithSaSuccess,
- wantPod: expectedPod("test-taskrun-with-sa-run-success-pod", "test-with-sa", "test-taskrun-with-sa-run-success", "bar", "foo", "test-sa", false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-sa-run-success-pod", "test-with-sa", "test-taskrun-with-sa-run-success", "foo", "test-sa", false, nil, []stepForExpectedPod{{
image: "foo",
name: "sa-step",
cmd: "/mycmd",
@@ -629,7 +620,7 @@ spec:
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, "reconcile-cloud-events", wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
wantCloudEvents := []string{
@@ -645,7 +636,6 @@ func TestReconcile(t *testing.T) {
metadata:
name: test-taskrun-run-success
namespace: foo
- uid: bar
spec:
taskRef:
apiVersion: v1
@@ -655,7 +645,6 @@ spec:
metadata:
name: test-taskrun-with-sa-run-success
namespace: foo
- uid: bar
spec:
serviceAccountName: test-sa
taskRef:
@@ -666,7 +655,6 @@ spec:
metadata:
name: test-taskrun-substitution
namespace: foo
- uid: bar
spec:
params:
- name: myarg
@@ -683,7 +671,6 @@ spec:
metadata:
name: test-taskrun-with-taskspec
namespace: foo
- uid: bar
spec:
params:
- name: myarg
@@ -705,7 +692,6 @@ spec:
metadata:
name: test-taskrun-with-cluster-task
namespace: foo
- uid: bar
spec:
taskRef:
kind: ClusterTask
@@ -719,7 +705,6 @@ metadata:
tekton.dev/taskRun: WillNotBeUsed
name: test-taskrun-with-labels
namespace: foo
- uid: bar
spec:
taskRef:
name: test-task
@@ -731,7 +716,6 @@ metadata:
TaskRunAnnotation: TaskRunValue
name: test-taskrun-with-annotations
namespace: foo
- uid: bar
spec:
taskRef:
name: test-task
@@ -741,7 +725,6 @@ spec:
metadata:
name: test-taskrun-with-pod
namespace: foo
- uid: bar
spec:
taskRef:
name: test-task
@@ -753,7 +736,6 @@ status:
metadata:
name: test-taskrun-with-credentials-variable
namespace: foo
- uid: bar
spec:
taskSpec:
steps:
@@ -781,7 +763,6 @@ spec:
metadata:
name: test-taskrun-bundle
namespace: foo
- uid: bar
spec:
taskRef:
bundle: %s
@@ -812,7 +793,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-run-success-pod", "test-task", "test-taskrun-run-success", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-run-success-pod", "test-task", "test-taskrun-run-success", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
image: "foo",
name: "simple-step",
cmd: "/mycmd",
@@ -824,7 +805,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-sa-run-success-pod", "test-with-sa", "test-taskrun-with-sa-run-success", "bar", "foo", "test-sa", false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-sa-run-success-pod", "test-with-sa", "test-taskrun-with-sa-run-success", "foo", "test-sa", false, nil, []stepForExpectedPod{{
image: "foo",
name: "sa-step",
cmd: "/mycmd",
@@ -836,7 +817,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-substitution-pod", "test-task-with-substitution", "test-taskrun-substitution", "bar", "foo", config.DefaultServiceAccountValue, false, []corev1.Volume{{
+ wantPod: expectedPod("test-taskrun-substitution-pod", "test-task-with-substitution", "test-taskrun-substitution", "foo", config.DefaultServiceAccountValue, false, []corev1.Volume{{
Name: "volume-configmap",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
@@ -872,7 +853,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-taskspec-pod", "", "test-taskrun-with-taskspec", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{
+ wantPod: expectedPod("test-taskrun-with-taskspec-pod", "", "test-taskrun-with-taskspec", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{
{
name: "mycontainer",
image: "myimage",
@@ -886,7 +867,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-cluster-task-pod", "test-cluster-task", "test-taskrun-with-cluster-task", "bar", "foo", config.DefaultServiceAccountValue, true, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-cluster-task-pod", "test-cluster-task", "test-taskrun-with-cluster-task", "foo", config.DefaultServiceAccountValue, true, nil, []stepForExpectedPod{{
name: "simple-step",
image: "foo",
cmd: "/mycmd",
@@ -898,7 +879,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-pod-pod", "test-task", "test-taskrun-with-pod", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-pod-pod", "test-task", "test-taskrun-with-pod", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
name: "simple-step",
image: "foo",
cmd: "/mycmd",
@@ -910,7 +891,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-credentials-variable-pod", "", "test-taskrun-with-credentials-variable", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-credentials-variable-pod", "", "test-taskrun-with-credentials-variable", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
name: "mycontainer",
image: "myimage",
cmd: "/mycmd /tekton/creds",
@@ -922,7 +903,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-bundle-pod", "test-task", "test-taskrun-bundle", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-bundle-pod", "test-task", "test-taskrun-bundle", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
name: "simple-step",
image: "foo",
cmd: "/mycmd",
@@ -980,7 +961,7 @@ spec:
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, tc.name, tc.wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
})
}
@@ -992,7 +973,6 @@ func TestAlphaReconcile(t *testing.T) {
metadata:
name: test-taskrun-with-output-config
namespace: foo
- uid: bar
spec:
taskSpec:
steps:
@@ -1008,7 +988,6 @@ spec:
metadata:
name: test-taskrun-with-output-config-ws
namespace: foo
- uid: bar
spec:
workspaces:
- name: data
@@ -1053,7 +1032,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "bar", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
+ wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{
name: "mycontainer",
image: "myimage",
stdoutPath: "stdout.txt",
@@ -1066,7 +1045,7 @@ spec:
"Normal Started ",
"Normal Running Not all Steps",
},
- wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "bar", "foo", config.DefaultServiceAccountValue, false,
+ wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "foo", config.DefaultServiceAccountValue, false,
[]corev1.Volume{{
Name: "ws-d872e",
VolumeSource: corev1.VolumeSource{
@@ -1136,7 +1115,7 @@ spec:
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, tc.name, tc.wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
})
}
@@ -1216,7 +1195,7 @@ spec:
spec:
steps:
- name: step1
- image: docker.io/library/ubuntu
+ image: ubuntu
script: |
echo "hello world!"
`)
@@ -1475,7 +1454,7 @@ spec:
err := k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, tc.name, tc.wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
newTr, err := testAssets.Clients.Pipeline.TektonV1().TaskRuns(tc.taskRun.Namespace).Get(testAssets.Ctx, tc.taskRun.Name, metav1.GetOptions{})
@@ -1717,6 +1696,7 @@ status:
provenance:
featureFlags:
RunningInEnvWithInjectedSidecars: true
+ EnableTektonOCIBundles: true
EnforceNonfalsifiability: "none"
EnableAPIFields: "alpha"
AwaitSidecarReadiness: true
@@ -1729,6 +1709,7 @@ status:
provenance:
featureFlags:
RunningInEnvWithInjectedSidecars: true
+ EnableTektonOCIBundles: true
EnableAPIFields: "alpha"
EnforceNonfalsifiability: "none"
AwaitSidecarReadiness: true
@@ -1985,47 +1966,39 @@ spec:
Tasks: []*v1.Task{simpleTask},
ClusterTasks: []*v1beta1.ClusterTask{},
}
- for _, v := range []error{
- errors.New("etcdserver: leader changed"),
- context.DeadlineExceeded,
- apierrors.NewConflict(pipeline.TaskRunResource, "", nil),
- apierrors.NewServerTimeout(pipeline.TaskRunResource, "", 0),
- apierrors.NewTimeoutError("", 0),
- } {
- testAssets, cancel := getTaskRunController(t, d)
- defer cancel()
- c := testAssets.Controller
- clients := testAssets.Clients
- createServiceAccount(t, testAssets, "default", tr.Namespace)
+ testAssets, cancel := getTaskRunController(t, d)
+ defer cancel()
+ c := testAssets.Controller
+ clients := testAssets.Clients
+ createServiceAccount(t, testAssets, "default", tr.Namespace)
- failingReactorActivated := true
- clients.Pipeline.PrependReactor("*", "tasks", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) {
- return failingReactorActivated, &v1.Task{}, v
- })
- err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr))
- if err == nil {
- t.Error("Wanted a wrapped error, but got nil.")
- }
- if controller.IsPermanentError(err) {
- t.Errorf("Unexpected permanent error %v", err)
- }
+ failingReactorActivated := true
+ clients.Pipeline.PrependReactor("*", "tasks", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) {
+ return failingReactorActivated, &v1.Task{}, errors.New("etcdserver: leader changed")
+ })
+ err := c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr))
+ if err == nil {
+ t.Error("Wanted a wrapped error, but got nil.")
+ }
+ if controller.IsPermanentError(err) {
+ t.Errorf("Unexpected permanent error %v", err)
+ }
- failingReactorActivated = false
- err = c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr))
- if err != nil {
- if ok, _ := controller.IsRequeueKey(err); !ok {
- t.Errorf("unexpected error in TaskRun reconciliation: %v", err)
- }
- }
- reconciledRun, err := clients.Pipeline.TektonV1().TaskRuns("foo").Get(testAssets.Ctx, tr.Name, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err)
- }
- condition := reconciledRun.Status.GetCondition(apis.ConditionSucceeded)
- if !condition.IsUnknown() {
- t.Errorf("Expected TaskRun to still be running but succeeded condition is %v", condition.Status)
+ failingReactorActivated = false
+ err = c.Reconciler.Reconcile(testAssets.Ctx, getRunName(tr))
+ if err != nil {
+ if ok, _ := controller.IsRequeueKey(err); !ok {
+ t.Errorf("unexpected error in TaskRun reconciliation: %v", err)
}
}
+ reconciledRun, err := clients.Pipeline.TektonV1().TaskRuns("foo").Get(testAssets.Ctx, tr.Name, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Somehow had error getting reconciled run out of fake client: %s", err)
+ }
+ condition := reconciledRun.Status.GetCondition(apis.ConditionSucceeded)
+ if !condition.IsUnknown() {
+ t.Errorf("Expected TaskRun to still be running but succeeded condition is %v", condition.Status)
+ }
}
func TestReconcile_InvalidRemoteTask(t *testing.T) {
@@ -2127,7 +2100,7 @@ spec:
resolver: bar
`)
- stepAction := parse.MustParseV1beta1StepAction(t, `
+ stepAction := parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -2162,7 +2135,7 @@ spec:
clients := testAssets.Clients
err = c.Reconciler.Reconcile(testAssets.Ctx, fmt.Sprintf("%s/%s", tr.Namespace, tr.Name))
if controller.IsPermanentError(err) {
- t.Errorf("Not expected permanent error but got %v", err)
+ t.Errorf("Not expected permanent error but got %t", err)
}
reconciledRun, err := clients.Pipeline.TektonV1().TaskRuns(tr.Namespace).Get(testAssets.Ctx, tr.Name, metav1.GetOptions{})
if err != nil {
@@ -2187,7 +2160,7 @@ spec:
resolver: bar
`)}
- stepAction := parse.MustParseV1beta1StepAction(t, `
+ stepAction := parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -2332,7 +2305,7 @@ status:
// Check actions
actions := clients.Kube.Actions()
if len(actions) != 2 || !actions[0].Matches("list", "configmaps") || !actions[1].Matches("watch", "configmaps") {
- t.Errorf("expected 3 actions (list configmaps, and watch configmaps) created by the reconciler,"+
+ t.Errorf("expected 2 actions (list configmaps, and watch configmaps) created by the reconciler,"+
" got %d. Actions: %#v", len(actions), actions)
}
@@ -2501,7 +2474,7 @@ status:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, "test-reconcile-pod-updateStatus", wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
}
@@ -2600,7 +2573,7 @@ status:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, "test-reconcile-on-cancelled-taskrun", wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
// reconcile the completed TaskRun again without the pod as that was deleted
@@ -2673,7 +2646,7 @@ status:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, "test-reconcile-on-timedout-taskrun", wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
// reconcile the completed TaskRun again without the pod as that was deleted
@@ -2777,7 +2750,7 @@ metadata:
spec:
taskSpec:
sidecars:
- - image: docker.io/library/ubuntu:24.04
+ - image: ubuntu
- image: whatever
steps:
- image: alpine
@@ -2906,7 +2879,7 @@ status:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, taskRun.Name, wantEvents)
if err != nil {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
}
})
@@ -3111,7 +3084,7 @@ status:
}
err = k8sevent.CheckEventsOrdered(t, testAssets.Recorder.Events, tc.taskRun.Name, tc.wantEvents)
if !(err == nil) {
- t.Error(err.Error())
+ t.Errorf(err.Error())
}
})
}
@@ -3577,7 +3550,7 @@ spec:
- name: inlined-step
image: "inlined-image"
`)
- stepAction := parse.MustParseV1beta1StepAction(t, `
+ stepAction := parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3587,7 +3560,7 @@ spec:
securityContext:
privileged: true
`)
- stepAction2 := parse.MustParseV1beta1StepAction(t, `
+ stepAction2 := parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction2
namespace: foo
@@ -3597,7 +3570,7 @@ spec:
`)
d := test.Data{
TaskRuns: []*v1.TaskRun{taskRun},
- StepActions: []*v1beta1.StepAction{stepAction, stepAction2},
+ StepActions: []*v1alpha1.StepAction{stepAction, stepAction2},
ConfigMaps: []*corev1.ConfigMap{
{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
@@ -3687,7 +3660,7 @@ func TestStepActionRefParams(t *testing.T) {
tests := []struct {
name string
taskRun *v1.TaskRun
- stepAction *v1beta1.StepAction
+ stepAction *v1alpha1.StepAction
want []v1.Step
}{{
name: "params propagated from taskrun",
@@ -3717,7 +3690,7 @@ spec:
- name: object-param
value: $(params.objectparam[*])
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3762,7 +3735,7 @@ spec:
- name: stringparam
value: "step string param"
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3795,7 +3768,7 @@ spec:
name: stepAction
name: step1
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3826,7 +3799,7 @@ spec:
name: stepAction
name: step1
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3872,7 +3845,7 @@ spec:
- name: object-param
value: $(params.objectparam[*])
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3909,7 +3882,7 @@ spec:
name: stepAction
name: step1
`),
- stepAction: parse.MustParseV1beta1StepAction(t, `
+ stepAction: parse.MustParseV1alpha1StepAction(t, `
metadata:
name: stepAction
namespace: foo
@@ -3946,7 +3919,7 @@ spec:
t.Run(tt.name, func(t *testing.T) {
d := test.Data{
TaskRuns: []*v1.TaskRun{tt.taskRun},
- StepActions: []*v1beta1.StepAction{tt.stepAction},
+ StepActions: []*v1alpha1.StepAction{tt.stepAction},
ConfigMaps: []*corev1.ConfigMap{
{
ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()},
@@ -5245,7 +5218,7 @@ status:
t.Fatal(err)
}
if d := cmp.Diff(&tc.expectedStatus, tc.taskRun.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
if tc.expectedStepStates != nil {
@@ -5349,7 +5322,7 @@ spec:
t.Errorf("storePipelineSpec() error = %v", err)
}
if d := cmp.Diff(tc.wantTaskRun, tr); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
// mock second reconcile
@@ -5357,7 +5330,7 @@ spec:
t.Errorf("storePipelineSpec() error = %v", err)
}
if d := cmp.Diff(tc.wantTaskRun, tr); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
})
}
@@ -5382,10 +5355,10 @@ func Test_storeTaskSpec_metadata(t *testing.T) {
t.Errorf("storeTaskSpecAndMergeMeta error = %v", err)
}
if d := cmp.Diff(wantedlabels, tr.ObjectMeta.Labels); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
if d := cmp.Diff(wantedannotations, tr.ObjectMeta.Annotations); d != "" {
- t.Fatal(diff.PrintWantGot(d))
+ t.Fatalf(diff.PrintWantGot(d))
}
}
@@ -5879,7 +5852,7 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount {
MountPath: "/tekton/bin",
ReadOnly: true,
})
- for i := range totalSteps {
+ for i := 0; i < totalSteps; i++ {
mnts = append(mnts, corev1.VolumeMount{
Name: fmt.Sprintf("tekton-internal-run-%d", i),
MountPath: filepath.Join("/tekton/run", strconv.Itoa(i)),
@@ -5914,10 +5887,6 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount {
MountPath: "/tekton/steps",
ReadOnly: true,
})
- mnts = append(mnts, corev1.VolumeMount{
- Name: "tekton-internal-artifacts",
- MountPath: "/tekton/artifacts",
- })
return mnts
}
@@ -5956,7 +5925,7 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []
return args
}
-func podObjectMeta(name, taskName, taskRunName, taskRunUID, ns string, isClusterTask bool) metav1.ObjectMeta {
+func podObjectMeta(name, taskName, taskRunName, ns string, isClusterTask bool) metav1.ObjectMeta {
trueB := true
om := metav1.ObjectMeta{
Name: name,
@@ -5966,7 +5935,6 @@ func podObjectMeta(name, taskName, taskRunName, taskRunUID, ns string, isCluster
},
Labels: map[string]string{
pipeline.TaskRunLabelKey: taskRunName,
- pipeline.TaskRunUIDLabelKey: taskRunUID,
"app.kubernetes.io/managed-by": "tekton-pipelines",
},
OwnerReferences: []metav1.OwnerReference{{
@@ -5975,7 +5943,6 @@ func podObjectMeta(name, taskName, taskRunName, taskRunUID, ns string, isCluster
Controller: &trueB,
BlockOwnerDeletion: &trueB,
APIVersion: currentAPIVersion,
- UID: types.UID(taskRunUID),
}},
}
@@ -6002,19 +5969,18 @@ type stepForExpectedPod struct {
stderrPath string
}
-func expectedPod(podName, taskName, taskRunName, taskRunUID, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod {
+func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod {
stepNames := make([]string, 0, len(steps))
for _, s := range steps {
stepNames = append(stepNames, "step-"+s.name)
}
p := &corev1.Pod{
- ObjectMeta: podObjectMeta(podName, taskName, taskRunName, taskRunUID, ns, isClusterTask),
+ ObjectMeta: podObjectMeta(podName, taskName, taskRunName, ns, isClusterTask),
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
workspaceVolume,
homeVolume,
resultsVolume,
- artifactsVolume,
stepsVolume,
binVolume,
downwardVolume,
@@ -7074,7 +7040,7 @@ func TestIsConcurrentModificationError(t *testing.T) {
// the ResolutionRequest's name is generated by resolverName, namespace and runName.
func getResolvedResolutionRequest(t *testing.T, resolverName string, resourceBytes []byte, namespace string, runName string) resolutionv1beta1.ResolutionRequest {
t.Helper()
- name, err := remoteresource.GenerateDeterministicNameFromSpec(resolverName, namespace+"/"+runName, &resolutionv1beta1.ResolutionRequestSpec{})
+ name, err := remoteresource.GenerateDeterministicName(resolverName, namespace+"/"+runName, nil)
if err != nil {
t.Errorf("error generating name for %s/%s/%s: %v", resolverName, namespace, runName, err)
}
diff --git a/upstream/pkg/reconciler/taskrun/tracing.go b/upstream/pkg/reconciler/taskrun/tracing.go
index 9bcf616c1c4..d5e26a3a984 100644
--- a/upstream/pkg/reconciler/taskrun/tracing.go
+++ b/upstream/pkg/reconciler/taskrun/tracing.go
@@ -42,7 +42,7 @@ func initTracing(ctx context.Context, tracerProvider trace.TracerProvider, tr *v
pro := otel.GetTextMapPropagator()
// SpanContext was created already
- if len(tr.Status.SpanContext) > 0 {
+ if tr.Status.SpanContext != nil && len(tr.Status.SpanContext) > 0 {
return pro.Extract(ctx, propagation.MapCarrier(tr.Status.SpanContext))
}
diff --git a/upstream/pkg/remote/resolution/error.go b/upstream/pkg/remote/resolution/error.go
index 9621060e074..05022c5f8cb 100644
--- a/upstream/pkg/remote/resolution/error.go
+++ b/upstream/pkg/remote/resolution/error.go
@@ -36,7 +36,7 @@ var (
// InvalidRuntimeObjectError is returned when remote resolution
// succeeded but the returned data is not a valid runtime.Object.
type InvalidRuntimeObjectError struct {
- Original error
+ original error
}
// ErrorInvalidRuntimeObject is an alias to InvalidRuntimeObjectError.
@@ -51,12 +51,12 @@ var (
// Error returns the string representation of this error.
func (e *InvalidRuntimeObjectError) Error() string {
- return fmt.Sprintf("invalid runtime object: %v", e.Original)
+ return fmt.Sprintf("invalid runtime object: %v", e.original)
}
// Unwrap returns the underlying original error.
func (e *InvalidRuntimeObjectError) Unwrap() error {
- return e.Original
+ return e.original
}
// Is returns true if the given error coerces into an error of this type.
@@ -68,7 +68,7 @@ func (e *InvalidRuntimeObjectError) Is(that error) bool {
// attempting to access the resolved data failed. An example of this
// type of error would be if a ResolutionRequest contained malformed base64.
type DataAccessError struct {
- Original error
+ original error
}
// ErrorAccessingData is an alias to DataAccessError
@@ -83,12 +83,12 @@ var (
// Error returns the string representation of this error.
func (e *DataAccessError) Error() string {
- return fmt.Sprintf("error accessing data from remote resource: %v", e.Original)
+ return fmt.Sprintf("error accessing data from remote resource: %v", e.original)
}
// Unwrap returns the underlying original error.
func (e *DataAccessError) Unwrap() error {
- return e.Original
+ return e.original
}
// Is returns true if the given error coerces into an error of this type.
diff --git a/upstream/pkg/remote/resolution/resolver.go b/upstream/pkg/remote/resolution/resolver.go
index f858d1259e0..772b39e416a 100644
--- a/upstream/pkg/remote/resolution/resolver.go
+++ b/upstream/pkg/remote/resolution/resolver.go
@@ -19,13 +19,11 @@ import (
"fmt"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme"
"github.com/tektoncd/pipeline/pkg/remote"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/serializer"
"knative.dev/pkg/kmeta"
)
@@ -65,7 +63,24 @@ func (resolver *Resolver) Get(ctx context.Context, _, _ string) (runtime.Object,
return nil, nil, fmt.Errorf("error building request for remote resource: %w", err)
}
resolved, err := resolver.requester.Submit(ctx, resolverName, req)
- return ResolvedRequest(resolved, err)
+ switch {
+ case errors.Is(err, resolutioncommon.ErrRequestInProgress):
+ return nil, nil, remote.ErrRequestInProgress
+ case err != nil:
+ return nil, nil, fmt.Errorf("error requesting remote resource: %w", err)
+ case resolved == nil:
+ return nil, nil, ErrNilResource
+ default:
+ }
+ data, err := resolved.Data()
+ if err != nil {
+ return nil, nil, &DataAccessError{original: err}
+ }
+ obj, _, err := scheme.Codecs.UniversalDeserializer().Decode(data, nil, nil)
+ if err != nil {
+ return nil, nil, &InvalidRuntimeObjectError{original: err}
+ }
+ return obj, resolved.RefSource(), nil
}
// List implements remote.Resolver but is unused for remote resolution.
@@ -74,12 +89,20 @@ func (resolver *Resolver) List(_ context.Context) ([]remote.ResolvedObject, erro
}
func buildRequest(resolverName string, owner kmeta.OwnerRefable, name string, namespace string, params v1.Params) (*resolutionRequest, error) {
- rr := &v1beta1.ResolutionRequestSpec{
- Params: params,
+ if name == "" {
+ name = owner.GetObjectMeta().GetName()
+ namespace = owner.GetObjectMeta().GetNamespace()
}
- name, namespace, err := remoteresource.GetNameAndNamespace(resolverName, owner, name, namespace, rr)
+ if namespace == "" {
+ namespace = "default"
+ }
+ // Generating a deterministic name for the resource request
+ // prevents multiple requests being issued for the same
+ // pipelinerun's pipelineRef or taskrun's taskRef.
+ remoteResourceBaseName := namespace + "/" + name
+ name, err := remoteresource.GenerateDeterministicName(resolverName, remoteResourceBaseName, params)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("error generating name for taskrun %s/%s: %w", namespace, name, err)
}
req := &resolutionRequest{
Request: remoteresource.NewRequest(name, namespace, params),
@@ -87,25 +110,3 @@ func buildRequest(resolverName string, owner kmeta.OwnerRefable, name string, na
}
return req, nil
}
-
-func ResolvedRequest(resolved resolutioncommon.ResolvedResource, err error) (runtime.Object, *v1.RefSource, error) {
- switch {
- case errors.Is(err, resolutioncommon.ErrRequestInProgress):
- return nil, nil, remote.ErrRequestInProgress
- case err != nil:
- return nil, nil, fmt.Errorf("error requesting remote resource: %w", err)
- case resolved == nil:
- return nil, nil, ErrNilResource
- default:
- }
- data, err := resolved.Data()
- if err != nil {
- return nil, nil, &DataAccessError{Original: err}
- }
- codecs := serializer.NewCodecFactory(scheme.Scheme, serializer.EnableStrict)
- obj, _, err := codecs.UniversalDeserializer().Decode(data, nil, nil)
- if err != nil {
- return nil, nil, &InvalidRuntimeObjectError{Original: err}
- }
- return obj, resolved.RefSource(), nil
-}
diff --git a/upstream/pkg/remote/resolution/resolver_test.go b/upstream/pkg/remote/resolution/resolver_test.go
index 88150665332..8e900ba50ea 100644
--- a/upstream/pkg/remote/resolution/resolver_test.go
+++ b/upstream/pkg/remote/resolution/resolver_test.go
@@ -23,8 +23,8 @@ import (
"github.com/tektoncd/pipeline/pkg/remote"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
remoteresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
+ "github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
- resolution "github.com/tektoncd/pipeline/test/resolution"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/kmeta"
)
@@ -40,7 +40,7 @@ spec:
taskSpec:
steps:
- name: step1
- image: docker.io/library/ubuntu
+ image: ubuntu
script: |
echo "hello world!"
`)
@@ -60,11 +60,11 @@ func TestGet_Successful(t *testing.T) {
Namespace: "bar",
},
}
- resolved := &resolution.ResolvedResource{
+ resolved := &test.ResolvedResource{
ResolvedData: tc.resolvedData,
ResolvedAnnotations: tc.resolvedAnnotations,
}
- requester := &resolution.Requester{
+ requester := &test.Requester{
SubmitErr: nil,
ResolvedResource: resolved,
}
@@ -75,75 +75,16 @@ func TestGet_Successful(t *testing.T) {
}
}
-var invalidPipelineBytes = []byte(`
-kind: Pipeline
-apiVersion: tekton.dev/v1
-metadata:
- name: foo
-spec:
- tasks:
- - name: task1
- taskSpec:
- foo: bar
- steps:
- - name: step1
- image: ubuntu
- script: |
- echo "hello world!"
- foo: bar
-`)
-
-var invalidTaskBytes = []byte(`
-kind: Task
-apiVersion: tekton.dev/v1
-metadata:
- name: foo
-spec:
- foo: bar
- steps:
- - name: step1
- image: ubuntu
- script: |
- echo "hello world!"
-`)
-
-var invalidStepActionBytes = []byte(`
-kind: StepAction
-apiVersion: tekton.dev/v1beta1
-metadata:
- name: foo
-spec:
- image: ubuntu
- script: |
- echo "hello world!"
- foo: bar
-`)
-
func TestGet_Errors(t *testing.T) {
genericError := errors.New("uh oh something bad happened")
- notARuntimeObject := &resolution.ResolvedResource{
+ notARuntimeObject := &test.ResolvedResource{
ResolvedData: []byte(">:)"),
ResolvedAnnotations: nil,
}
- invalidDataResource := &resolution.ResolvedResource{
+ invalidDataResource := &test.ResolvedResource{
DataErr: errors.New("data access error"),
ResolvedAnnotations: nil,
}
- invalidPipeline := &resolution.ResolvedResource{
- ResolvedData: invalidPipelineBytes,
- DataErr: errors.New(`spec.tasks[0].taskSpec.foo", unknown field "spec.tasks[0].taskSpec.steps[0].foo`),
- ResolvedAnnotations: nil,
- }
- invalidTask := &resolution.ResolvedResource{
- ResolvedData: invalidTaskBytes,
- DataErr: errors.New(`spec.foo", unknown field "spec.steps[0].foo`),
- ResolvedAnnotations: nil,
- }
- invalidStepAction := &resolution.ResolvedResource{
- ResolvedData: invalidStepActionBytes,
- DataErr: errors.New(`unknown field "spec.foo`),
- ResolvedAnnotations: nil,
- }
for _, tc := range []struct {
submitErr error
expectedGetErr error
@@ -168,18 +109,6 @@ func TestGet_Errors(t *testing.T) {
submitErr: nil,
expectedGetErr: &DataAccessError{},
resolvedResource: invalidDataResource,
- }, {
- submitErr: nil,
- expectedGetErr: &DataAccessError{},
- resolvedResource: invalidPipeline,
- }, {
- submitErr: nil,
- expectedGetErr: &DataAccessError{},
- resolvedResource: invalidTask,
- }, {
- submitErr: nil,
- expectedGetErr: &DataAccessError{},
- resolvedResource: invalidStepAction,
}} {
ctx := context.Background()
owner := &v1beta1.PipelineRun{
@@ -188,7 +117,7 @@ func TestGet_Errors(t *testing.T) {
Namespace: "bar",
},
}
- requester := &resolution.Requester{
+ requester := &test.Requester{
SubmitErr: tc.submitErr,
ResolvedResource: tc.resolvedResource,
}
diff --git a/upstream/pkg/remoteresolution/doc.go b/upstream/pkg/remoteresolution/doc.go
deleted file mode 100644
index 83734f5ddd9..00000000000
--- a/upstream/pkg/remoteresolution/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package remoteresolution contains the upgraded remote resolution framework.
-This was necessary to ensure backwards compatibility with the existing framework.
-
-This package is subject to further refactoring and changes.
-*/
-package remoteresolution
diff --git a/upstream/pkg/remoteresolution/remote/resolution/doc.go b/upstream/pkg/remoteresolution/remote/resolution/doc.go
deleted file mode 100644
index 3fc8f5f7a8e..00000000000
--- a/upstream/pkg/remoteresolution/remote/resolution/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package resolution contains the upgraded remote resolution framework.
-It is equivalent to `pkg/remote/resolution`.
-This was necessary to ensure backwards compatibility with the existing framework.
-
-This package is subject to further refactoring and changes.
-*/
-package resolution
diff --git a/upstream/pkg/remoteresolution/remote/resolution/request.go b/upstream/pkg/remoteresolution/remote/resolution/request.go
deleted file mode 100644
index 5a22f414014..00000000000
--- a/upstream/pkg/remoteresolution/remote/resolution/request.go
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resolution
-
-import (
- resolution "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/kmeta"
-)
-
-var _ resolution.Request = &resolutionRequest{}
-var _ resolution.OwnedRequest = &resolutionRequest{}
-
-type resolutionRequest struct {
- resolution.Request
- owner kmeta.OwnerRefable
-}
-
-func (req *resolutionRequest) OwnerRef() metav1.OwnerReference {
- return *kmeta.NewControllerRef(req.owner)
-}
diff --git a/upstream/pkg/remoteresolution/remote/resolution/resolver.go b/upstream/pkg/remoteresolution/remote/resolution/resolver.go
deleted file mode 100644
index d3500ae6396..00000000000
--- a/upstream/pkg/remoteresolution/remote/resolution/resolver.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resolution
-
-import (
- "context"
- "fmt"
-
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remote"
- resolution "github.com/tektoncd/pipeline/pkg/remote/resolution"
- remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- resource "github.com/tektoncd/pipeline/pkg/resolution/resource"
- "k8s.io/apimachinery/pkg/runtime"
- "knative.dev/pkg/kmeta"
-)
-
-// Resolver implements remote.Resolver and encapsulates the majority of
-// code required to interface with the tektoncd/resolution project. It
-// is used to make async requests for resources like pipelines from
-// remote places like git repos.
-type Resolver struct {
- requester remoteresource.Requester
- owner kmeta.OwnerRefable
- resolverName string
- resolverPayload remoteresource.ResolverPayload
-}
-
-var _ remote.Resolver = &Resolver{}
-
-// NewResolver returns an implementation of remote.Resolver capable
-// of performing asynchronous remote resolution.
-func NewResolver(requester remoteresource.Requester, owner kmeta.OwnerRefable, resolverName string, resolverPayload remoteresource.ResolverPayload) remote.Resolver {
- return &Resolver{
- requester: requester,
- owner: owner,
- resolverName: resolverName,
- resolverPayload: resolverPayload,
- }
-}
-
-// Get implements remote.Resolver.
-func (resolver *Resolver) Get(ctx context.Context, _, _ string) (runtime.Object, *v1.RefSource, error) {
- resolverName := remoteresource.ResolverName(resolver.resolverName)
- req, err := buildRequest(resolver.resolverName, resolver.owner, &resolver.resolverPayload)
- if err != nil {
- return nil, nil, fmt.Errorf("error building request for remote resource: %w", err)
- }
- resolved, err := resolver.requester.Submit(ctx, resolverName, req)
- return resolution.ResolvedRequest(resolved, err)
-}
-
-// List implements remote.Resolver but is unused for remote resolution.
-func (resolver *Resolver) List(_ context.Context) ([]remote.ResolvedObject, error) {
- return nil, nil
-}
-
-func buildRequest(resolverName string, owner kmeta.OwnerRefable, resolverPayload *remoteresource.ResolverPayload) (*resolutionRequest, error) {
- var name string
- var namespace string
- var url string
- var params v1.Params
- if resolverPayload != nil {
- name = resolverPayload.Name
- namespace = resolverPayload.Namespace
- if resolverPayload.ResolutionSpec != nil {
- params = resolverPayload.ResolutionSpec.Params
- url = resolverPayload.ResolutionSpec.URL
- }
- }
- rr := &v1beta1.ResolutionRequestSpec{
- Params: params,
- URL: url,
- }
- name, namespace, err := resource.GetNameAndNamespace(resolverName, owner, name, namespace, rr)
- if err != nil {
- return nil, err
- }
- resolverPayload.Name = name
- resolverPayload.Namespace = namespace
- req := &resolutionRequest{
- Request: remoteresource.NewRequest(*resolverPayload),
- owner: owner,
- }
- return req, nil
-}
diff --git a/upstream/pkg/remoteresolution/remote/resolution/resolver_test.go b/upstream/pkg/remoteresolution/remote/resolution/resolver_test.go
deleted file mode 100644
index e196e327e27..00000000000
--- a/upstream/pkg/remoteresolution/remote/resolution/resolver_test.go
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resolution
-
-import (
- "context"
- "errors"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- resv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
-
- "github.com/tektoncd/pipeline/pkg/remote"
- "github.com/tektoncd/pipeline/pkg/remote/resolution"
- remoteresource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resource"
- "github.com/tektoncd/pipeline/test/diff"
- test "github.com/tektoncd/pipeline/test/remoteresolution"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/kmeta"
-)
-
-var pipelineBytes = []byte(`
-kind: Pipeline
-apiVersion: tekton.dev/v1beta1
-metadata:
- name: foo
-spec:
- tasks:
- - name: task1
- taskSpec:
- steps:
- - name: step1
- image: docker.io/library/ubuntu
- script: |
- echo "hello world!"
-`)
-
-func TestGet_Successful(t *testing.T) {
- for _, tc := range []struct {
- resolvedData []byte
- resolvedAnnotations map[string]string
- }{{
- resolvedData: pipelineBytes,
- resolvedAnnotations: nil,
- }} {
- ctx := context.Background()
- owner := &v1beta1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- }
- resolved := &test.ResolvedResource{
- ResolvedData: tc.resolvedData,
- ResolvedAnnotations: tc.resolvedAnnotations,
- }
- requester := &test.Requester{
- SubmitErr: nil,
- ResolvedResource: resolved,
- }
- resolver := NewResolver(requester, owner, "git", remoteresource.ResolverPayload{})
- if _, _, err := resolver.Get(ctx, "foo", "bar"); err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- }
-}
-
-func TestGet_Errors(t *testing.T) {
- genericError := errors.New("uh oh something bad happened")
- notARuntimeObject := &test.ResolvedResource{
- ResolvedData: []byte(">:)"),
- ResolvedAnnotations: nil,
- }
- invalidDataResource := &test.ResolvedResource{
- DataErr: errors.New("data access error"),
- ResolvedAnnotations: nil,
- }
- for _, tc := range []struct {
- submitErr error
- expectedGetErr error
- resolvedResource remoteresource.ResolvedResource
- }{{
- submitErr: common.ErrRequestInProgress,
- expectedGetErr: remote.ErrRequestInProgress,
- resolvedResource: nil,
- }, {
- submitErr: nil,
- expectedGetErr: resolution.ErrNilResource,
- resolvedResource: nil,
- }, {
- submitErr: genericError,
- expectedGetErr: genericError,
- resolvedResource: nil,
- }, {
- submitErr: nil,
- expectedGetErr: &resolution.InvalidRuntimeObjectError{},
- resolvedResource: notARuntimeObject,
- }, {
- submitErr: nil,
- expectedGetErr: &resolution.DataAccessError{},
- resolvedResource: invalidDataResource,
- }} {
- ctx := context.Background()
- owner := &v1beta1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- }
- requester := &test.Requester{
- SubmitErr: tc.submitErr,
- ResolvedResource: tc.resolvedResource,
- }
- resolver := NewResolver(requester, owner, "git", remoteresource.ResolverPayload{})
- obj, refSource, err := resolver.Get(ctx, "foo", "bar")
- if obj != nil {
- t.Errorf("received unexpected resolved resource")
- }
- if refSource != nil {
- t.Errorf("expected refSource is nil, but received %v", refSource)
- }
- if !errors.Is(err, tc.expectedGetErr) {
- t.Fatalf("expected %v received %v", tc.expectedGetErr, err)
- }
- }
-}
-
-func TestBuildRequestV2(t *testing.T) {
- for _, tc := range []struct {
- name string
- targetName string
- targetNamespace string
- url string
- }{{
- name: "just owner",
- }, {
- name: "with target name and namespace",
- targetName: "some-object",
- targetNamespace: "some-ns",
- }, {
- name: "with target name, namespace, and url",
- targetName: "some-object",
- targetNamespace: "some-ns",
- url: "scheme://value",
- }} {
- t.Run(tc.name, func(t *testing.T) {
- owner := &v1beta1.PipelineRun{
- ObjectMeta: metav1.ObjectMeta{
- Name: "foo",
- Namespace: "bar",
- },
- }
-
- rr := &remoteresource.ResolverPayload{Name: tc.targetName, Namespace: tc.targetNamespace}
- rr.ResolutionSpec = &resv1beta1.ResolutionRequestSpec{URL: tc.url}
- req, err := buildRequest("git", owner, rr)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if d := cmp.Diff(*kmeta.NewControllerRef(owner), req.OwnerRef()); d != "" {
- t.Errorf("expected matching owner ref but got %s", diff.PrintWantGot(d))
- }
- reqNameBase := owner.Namespace + "/" + owner.Name
- if tc.targetName != "" {
- reqNameBase = tc.targetNamespace + "/" + tc.targetName
- }
- expectedReqName, err := resource.GenerateDeterministicNameFromSpec("git", reqNameBase, rr.ResolutionSpec)
- if err != nil {
- t.Fatalf("unexpected error: %v", err)
- }
- if expectedReqName != req.ResolverPayload().Name {
- t.Errorf("expected request name %s, but was %s", expectedReqName, req.ResolverPayload().Name)
- }
- })
- }
-}
diff --git a/upstream/pkg/remoteresolution/resolver/bundle/resolver.go b/upstream/pkg/remoteresolution/resolver/bundle/resolver.go
deleted file mode 100644
index 4f8612931a0..00000000000
--- a/upstream/pkg/remoteresolution/resolver/bundle/resolver.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- Copyright 2024 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package bundle
-
-import (
- "context"
- "errors"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "k8s.io/client-go/kubernetes"
- "knative.dev/pkg/client/injection/kube/client"
-)
-
-const (
- // LabelValueBundleResolverType is the value to use for the
- // resolution.tekton.dev/type label on resource requests
- LabelValueBundleResolverType string = "bundles"
-
- // BundleResolverName is the name that the bundle resolver should be associated with.
- BundleResolverName = "bundleresolver"
-)
-
-var _ framework.Resolver = &Resolver{}
-
-// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
-type Resolver struct {
- kubeClientSet kubernetes.Interface
-}
-
-// Initialize sets up any dependencies needed by the Resolver. None atm.
-func (r *Resolver) Initialize(ctx context.Context) error {
- r.kubeClientSet = client.Get(ctx)
- return nil
-}
-
-// GetName returns a string name to refer to this Resolver by.
-func (r *Resolver) GetName(context.Context) string {
- return BundleResolverName
-}
-
-// GetConfigName returns the name of the bundle resolver's configmap.
-func (r *Resolver) GetConfigName(context.Context) string {
- return bundle.ConfigMapName
-}
-
-// GetSelector returns a map of labels to match requests to this Resolver.
-func (r *Resolver) GetSelector(context.Context) map[string]string {
- return map[string]string{
- common.LabelKeyResolverType: LabelValueBundleResolverType,
- }
-}
-
-// Validate ensures reqolution request spec from a request are as expected.
-func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return bundle.ValidateParams(ctx, req.Params)
- }
- // Remove this error once validate url has been implemented.
- return errors.New("cannot validate request. the Validate method has not been implemented.")
-}
-
-// Resolve uses the given request spec resolve the requested file or resource.
-func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- return bundle.ResolveRequest(ctx, r.kubeClientSet, req)
- }
- // Remove this error once resolution of url has been implemented.
- return nil, errors.New("the Resolve method has not been implemented.")
-}
diff --git a/upstream/pkg/remoteresolution/resolver/bundle/resolver_test.go b/upstream/pkg/remoteresolution/resolver/bundle/resolver_test.go
deleted file mode 100644
index 738c523991b..00000000000
--- a/upstream/pkg/remoteresolution/resolver/bundle/resolver_test.go
+++ /dev/null
@@ -1,665 +0,0 @@
-/*
- Copyright 2024 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package bundle_test
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http/httptest"
- "net/url"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-containerregistry/pkg/registry"
- resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- bundle "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/bundle"
- frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- bundleresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- frameworktesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- ktesting "k8s.io/client-go/testing"
- "knative.dev/pkg/system"
- _ "knative.dev/pkg/system/testing" // Setup system.Namespace()
- "sigs.k8s.io/yaml"
-)
-
-const (
- disabledError = "cannot handle resolution request, enable-bundles-resolver feature flag not true"
-)
-
-func TestGetSelector(t *testing.T) {
- resolver := bundle.Resolver{}
- sel := resolver.GetSelector(context.Background())
- if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
- t.Fatalf("unexpected selector: %v", sel)
- } else if typ != bundle.LabelValueBundleResolverType {
- t.Fatalf("unexpected type: %q", typ)
- }
-}
-
-func TestValidateParamsSecret(t *testing.T) {
- resolver := bundle.Resolver{}
- config := map[string]string{
- bundleresolution.ConfigServiceAccount: "default",
- }
- ctx := framework.InjectResolverConfigToContext(context.Background(), config)
-
- paramsWithTask := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: paramsWithTask}
- if err := resolver.Validate(ctx, &req); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-
- paramsWithPipeline := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("pipeline"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req = v1beta1.ResolutionRequestSpec{Params: paramsWithPipeline}
- if err := resolver.Validate(ctx, &req); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-}
-
-func TestValidateParamsServiceAccount(t *testing.T) {
- resolver := bundle.Resolver{}
- config := map[string]string{
- bundleresolution.ConfigServiceAccount: "default",
- }
- ctx := framework.InjectResolverConfigToContext(context.Background(), config)
-
- paramsWithTask := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: paramsWithTask}
- if err := resolver.Validate(ctx, &req); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-
- paramsWithPipeline := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("pipeline"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req = v1beta1.ResolutionRequestSpec{Params: paramsWithPipeline}
- if err := resolver.Validate(context.Background(), &req); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-}
-
-func TestValidateDisabled(t *testing.T) {
- resolver := bundle.Resolver{}
-
- var err error
-
- params := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: params}
- err = resolver.Validate(resolverDisabledContext(), &req)
- if err == nil {
- t.Fatalf("expected disabled err")
- }
-
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-func TestValidateMissing(t *testing.T) {
- resolver := bundle.Resolver{}
-
- var err error
-
- paramsMissingBundle := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: paramsMissingBundle}
- err = resolver.Validate(context.Background(), &req)
- if err == nil {
- t.Fatalf("expected missing kind err")
- }
-
- paramsMissingName := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req = v1beta1.ResolutionRequestSpec{Params: paramsMissingName}
- err = resolver.Validate(context.Background(), &req)
- if err == nil {
- t.Fatalf("expected missing name err")
- }
-}
-
-func TestResolveDisabled(t *testing.T) {
- resolver := bundle.Resolver{}
-
- var err error
-
- params := []pipelinev1.Param{{
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: params}
- _, err = resolver.Resolve(resolverDisabledContext(), &req)
- if err == nil {
- t.Fatalf("expected disabled err")
- }
-
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-func TestResolve_KeyChainError(t *testing.T) {
- resolver := &bundle.Resolver{}
- params := ¶ms{
- bundle: "foo",
- name: "example-task",
- kind: "task",
- secret: "bar",
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- request := createRequest(params)
-
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- ConfigMaps: []*corev1.ConfigMap{{
- ObjectMeta: metav1.ObjectMeta{
- Name: bundleresolution.ConfigMapName,
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: map[string]string{
- bundleresolution.ConfigKind: "task",
- bundleresolution.ConfigServiceAccount: "default",
- },
- }},
- }
-
- testAssets, cancel := frtesting.GetResolverFrameworkController(ctx, t, d, resolver)
- defer cancel()
-
- expectedErr := apierrors.NewBadRequest("bad request")
- // return error when getting secrets from kube client
- testAssets.Clients.Kube.Fake.PrependReactor("get", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) {
- return true, nil, expectedErr
- })
-
- err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, strings.Join([]string{request.Namespace, request.Name}, "/"))
- if err == nil {
- t.Fatalf("expected to get error but got nothing")
- }
-
- if !errors.Is(err, expectedErr) {
- t.Fatalf("expected to get error %v, but got %v", expectedErr, err)
- }
-}
-
-type params struct {
- serviceAccount string
- secret string
- bundle string
- name string
- kind string
-}
-
-func TestResolve(t *testing.T) {
- // example task resource
- exampleTask := &pipelinev1beta1.Task{
- ObjectMeta: metav1.ObjectMeta{
- Name: "example-task",
- Namespace: "task-ns",
- ResourceVersion: "00002",
- },
- TypeMeta: metav1.TypeMeta{
- Kind: string(pipelinev1beta1.NamespacedTaskKind),
- APIVersion: "tekton.dev/v1beta1",
- },
- Spec: pipelinev1beta1.TaskSpec{
- Steps: []pipelinev1beta1.Step{{
- Name: "some-step",
- Image: "some-image",
- Command: []string{"something"},
- }},
- },
- }
- taskAsYAML, err := yaml.Marshal(exampleTask)
- if err != nil {
- t.Fatalf("couldn't marshal task: %v", err)
- }
-
- // example pipeline resource
- examplePipeline := &pipelinev1beta1.Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "example-pipeline",
- Namespace: "pipeline-ns",
- ResourceVersion: "00001",
- },
- TypeMeta: metav1.TypeMeta{
- Kind: "Pipeline",
- APIVersion: "tekton.dev/v1beta1",
- },
- Spec: pipelinev1beta1.PipelineSpec{
- Tasks: []pipelinev1beta1.PipelineTask{{
- Name: "some-pipeline-task",
- TaskRef: &pipelinev1beta1.TaskRef{
- Name: "some-task",
- Kind: pipelinev1beta1.NamespacedTaskKind,
- },
- }},
- },
- }
- pipelineAsYAML, err := yaml.Marshal(examplePipeline)
- if err != nil {
- t.Fatalf("couldn't marshal pipeline: %v", err)
- }
-
- // too many objects in bundle resolver test
- var tooManyObjs []runtime.Object
- for i := 0; i <= bundleresolution.MaximumBundleObjects; i++ {
- name := fmt.Sprintf("%d-task", i)
- obj := pipelinev1beta1.Task{
- ObjectMeta: metav1.ObjectMeta{
- Name: name,
- },
- TypeMeta: metav1.TypeMeta{
- APIVersion: "tekton.dev/v1beta1",
- Kind: "Task",
- },
- }
- tooManyObjs = append(tooManyObjs, &obj)
- }
-
- // Set up a fake registry to push an image to.
- s := httptest.NewServer(registry.New())
- defer s.Close()
- u, err := url.Parse(s.URL)
- if err != nil {
- t.Fatal(err)
- }
- r := fmt.Sprintf("%s/%s", u.Host, "testbundleresolver")
- testImages := map[string]*imageRef{
- "single-task": pushToRegistry(t, r, "single-task", []runtime.Object{exampleTask}, test.DefaultObjectAnnotationMapper),
- "single-pipeline": pushToRegistry(t, r, "single-pipeline", []runtime.Object{examplePipeline}, test.DefaultObjectAnnotationMapper),
- "multiple-resources": pushToRegistry(t, r, "multiple-resources", []runtime.Object{exampleTask, examplePipeline}, test.DefaultObjectAnnotationMapper),
- "too-many-objs": pushToRegistry(t, r, "too-many-objs", tooManyObjs, asIsMapper),
- "single-task-no-version": pushToRegistry(t, r, "single-task-no-version", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{Kind: "task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
- "single-task-no-kind": pushToRegistry(t, r, "single-task-no-kind", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
- "single-task-no-name": pushToRegistry(t, r, "single-task-no-name", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1", Kind: "task"}}}, asIsMapper),
- "single-task-kind-incorrect-form": pushToRegistry(t, r, "single-task-kind-incorrect-form", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1", Kind: "Task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
- }
-
- testcases := []struct {
- name string
- args *params
- imageName string
- kindInBundle string
- expectedStatus *v1beta1.ResolutionRequestStatus
- expectedErrMessage string
- }{
- {
- name: "single task: digest is included in the bundle parameter",
- args: ¶ms{
- bundle: fmt.Sprintf("%s@%s:%s", testImages["single-task"].uri, testImages["single-task"].algo, testImages["single-task"].hex),
- name: "example-task",
- kind: "task",
- },
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single task: param kind is capitalized, but kind in bundle is not",
- args: ¶ms{
- bundle: fmt.Sprintf("%s@%s:%s", testImages["single-task"].uri, testImages["single-task"].algo, testImages["single-task"].hex),
- name: "example-task",
- kind: "Task",
- },
- kindInBundle: "task",
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single task: tag is included in the bundle parameter",
- args: ¶ms{
- bundle: testImages["single-task"].uri + ":latest",
- name: "example-task",
- kind: "task",
- },
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single task: using default kind value from configmap",
- args: ¶ms{
- bundle: testImages["single-task"].uri + ":latest",
- name: "example-task",
- },
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single pipeline",
- args: ¶ms{
- bundle: testImages["single-pipeline"].uri + ":latest",
- name: "example-pipeline",
- kind: "pipeline",
- },
- imageName: "single-pipeline",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
- }, {
- name: "multiple resources: an image has both task and pipeline resource",
- args: ¶ms{
- bundle: testImages["multiple-resources"].uri + ":latest",
- name: "example-pipeline",
- kind: "pipeline",
- },
- imageName: "multiple-resources",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
- }, {
- name: "too many objects in an image",
- args: ¶ms{
- bundle: testImages["too-many-objs"].uri + ":latest",
- name: "2-task",
- kind: "task",
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErrMessage: fmt.Sprintf("contained more than the maximum %d allow objects", bundleresolution.MaximumBundleObjects),
- }, {
- name: "single task no version",
- args: ¶ms{
- bundle: testImages["single-task-no-version"].uri + ":latest",
- name: "foo",
- kind: "task",
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundleresolution.BundleAnnotationAPIVersion),
- }, {
- name: "single task no kind",
- args: ¶ms{
- bundle: testImages["single-task-no-kind"].uri + ":latest",
- name: "foo",
- kind: "task",
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundleresolution.BundleAnnotationKind),
- }, {
- name: "single task no name",
- args: ¶ms{
- bundle: testImages["single-task-no-name"].uri + ":latest",
- name: "foo",
- kind: "task",
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundleresolution.BundleAnnotationName),
- }, {
- name: "single task kind incorrect form",
- args: ¶ms{
- bundle: testImages["single-task-kind-incorrect-form"].uri + ":latest",
- name: "foo",
- kind: "task",
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErrMessage: fmt.Sprintf("the layer 0 the annotation %s must be lowercased and singular, found %s", bundleresolution.BundleAnnotationKind, "Task"),
- },
- }
-
- resolver := &bundle.Resolver{}
- confMap := map[string]string{
- bundleresolution.ConfigKind: "task",
- bundleresolution.ConfigServiceAccount: "default",
- }
-
- for _, tc := range testcases {
- t.Run(tc.name, func(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- request := createRequest(tc.args)
-
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- ConfigMaps: []*corev1.ConfigMap{{
- ObjectMeta: metav1.ObjectMeta{
- Name: bundleresolution.ConfigMapName,
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: confMap,
- }, {
- ObjectMeta: metav1.ObjectMeta{
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- Name: resolverconfig.GetFeatureFlagsConfigName(),
- },
- Data: map[string]string{
- "enable-bundles-resolver": "true",
- },
- }},
- }
- var expectedStatus *v1beta1.ResolutionRequestStatus
- var expectedError error
- if tc.expectedStatus != nil {
- expectedStatus = tc.expectedStatus.DeepCopy()
- if tc.expectedErrMessage == "" {
- if expectedStatus.Annotations == nil {
- expectedStatus.Annotations = make(map[string]string)
- }
-
- switch {
- case tc.kindInBundle != "":
- expectedStatus.Annotations[bundleresolution.ResolverAnnotationKind] = tc.kindInBundle
- case tc.args.kind != "":
- expectedStatus.Annotations[bundleresolution.ResolverAnnotationKind] = tc.args.kind
- default:
- expectedStatus.Annotations[bundleresolution.ResolverAnnotationKind] = "task"
- }
-
- expectedStatus.Annotations[bundleresolution.ResolverAnnotationName] = tc.args.name
- expectedStatus.Annotations[bundleresolution.ResolverAnnotationAPIVersion] = "v1beta1"
-
- expectedStatus.RefSource = &pipelinev1.RefSource{
- URI: testImages[tc.imageName].uri,
- Digest: map[string]string{
- testImages[tc.imageName].algo: testImages[tc.imageName].hex,
- },
- EntryPoint: tc.args.name,
- }
- expectedStatus.Source = expectedStatus.RefSource
- } else {
- expectedError = createError(tc.args.bundle, tc.expectedErrMessage)
- expectedStatus.Status.Conditions[0].Message = expectedError.Error()
- }
- }
-
- frtesting.RunResolverReconcileTest(ctx, t, d, resolver, request, expectedStatus, expectedError)
- })
- }
-}
-
-func createRequest(p *params) *v1beta1.ResolutionRequest {
- rr := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: bundle.LabelValueBundleResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: bundleresolution.ParamBundle,
- Value: *pipelinev1.NewStructuredValues(p.bundle),
- }, {
- Name: bundleresolution.ParamName,
- Value: *pipelinev1.NewStructuredValues(p.name),
- }, {
- Name: bundleresolution.ParamKind,
- Value: *pipelinev1.NewStructuredValues(p.kind),
- }, {
- Name: bundleresolution.ParamImagePullSecret,
- Value: *pipelinev1.NewStructuredValues(p.secret),
- }, {
- Name: bundleresolution.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues(p.serviceAccount),
- }},
- },
- }
- return rr
-}
-
-func createError(image, msg string) error {
- return &resolutioncommon.GetResourceError{
- ResolverName: bundle.BundleResolverName,
- Key: "foo/rr",
- Original: fmt.Errorf("invalid tekton bundle %s, error: %s", image, msg),
- }
-}
-
-func asIsMapper(obj runtime.Object) map[string]string {
- annotations := map[string]string{}
- if test.GetObjectName(obj) != "" {
- annotations[bundleresolution.BundleAnnotationName] = test.GetObjectName(obj)
- }
-
- if obj.GetObjectKind().GroupVersionKind().Kind != "" {
- annotations[bundleresolution.BundleAnnotationKind] = obj.GetObjectKind().GroupVersionKind().Kind
- }
- if obj.GetObjectKind().GroupVersionKind().Version != "" {
- annotations[bundleresolution.BundleAnnotationAPIVersion] = obj.GetObjectKind().GroupVersionKind().Version
- }
- return annotations
-}
-
-func resolverDisabledContext() context.Context {
- return frameworktesting.ContextWithBundlesResolverDisabled(context.Background())
-}
-
-type imageRef struct {
- // uri is the image repositry identifier i.e. "gcr.io/tekton-releases/catalog/upstream/golang-build"
- uri string
- // algo is the algorithm portion of a particular image digest i.e. "sha256".
- algo string
- // hex is hex encoded portion of a particular image digest i.e. "23293df97dc11957ec36a88c80101bb554039a76e8992a435112eea8283b30d4".
- hex string
-}
-
-// pushToRegistry pushes an image to the registry and returns an imageRef.
-// It accepts a registry address, image name, the data and an ObjectAnnotationMapper
-// to map an object to the annotations for it.
-// NOTE: Every image pushed to the registry has a default tag named "latest".
-func pushToRegistry(t *testing.T, registry, imageName string, data []runtime.Object, mapper test.ObjectAnnotationMapper) *imageRef {
- t.Helper()
- ref, err := test.CreateImageWithAnnotations(fmt.Sprintf("%s/%s:latest", registry, imageName), mapper, data...)
- if err != nil {
- t.Fatalf("couldn't push the image: %v", err)
- }
-
- refSplit := strings.Split(ref, "@")
- uri, digest := refSplit[0], refSplit[1]
- digSplits := strings.Split(digest, ":")
- algo, hex := digSplits[0], digSplits[1]
-
- return &imageRef{
- uri: uri,
- algo: algo,
- hex: hex,
- }
-}
diff --git a/upstream/pkg/remoteresolution/resolver/cluster/resolver.go b/upstream/pkg/remoteresolution/resolver/cluster/resolver.go
deleted file mode 100644
index c08f8a18bd3..00000000000
--- a/upstream/pkg/remoteresolution/resolver/cluster/resolver.go
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cluster
-
-import (
- "context"
- "errors"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
- pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
-)
-
-const (
- // LabelValueClusterResolverType is the value to use for the
- // resolution.tekton.dev/type label on resource requests
- LabelValueClusterResolverType string = "cluster"
-
- // ClusterResolverName is the name that the cluster resolver should be
- // associated with
- ClusterResolverName string = "Cluster"
-
- configMapName = "cluster-resolver-config"
-)
-
-var _ framework.Resolver = &Resolver{}
-
-// ResolverV2 implements a framework.Resolver that can fetch resources from other namespaces.
-type Resolver struct {
- pipelineClientSet clientset.Interface
-}
-
-// Initialize performs any setup required by the cluster resolver.
-func (r *Resolver) Initialize(ctx context.Context) error {
- r.pipelineClientSet = pipelineclient.Get(ctx)
- return nil
-}
-
-// GetName returns the string name that the cluster resolver should be
-// associated with.
-func (r *Resolver) GetName(_ context.Context) string {
- return ClusterResolverName
-}
-
-// GetSelector returns the labels that resource requests are required to have for
-// the cluster resolver to process them.
-func (r *Resolver) GetSelector(_ context.Context) map[string]string {
- return map[string]string{
- resolutioncommon.LabelKeyResolverType: LabelValueClusterResolverType,
- }
-}
-
-// Validate returns an error if the given parameter map is not
-// valid for a resource request targeting the cluster resolver.
-func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return cluster.ValidateParams(ctx, req.Params)
- }
- // Remove this error once validate url has been implemented.
- return errors.New("cannot validate request. the Validate method has not been implemented.")
-}
-
-// Resolve performs the work of fetching a resource from a namespace with the given
-// resolution spec.
-func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- return cluster.ResolveFromParams(ctx, req.Params, r.pipelineClientSet)
- }
- // Remove this error once resolution of url has been implemented.
- return nil, errors.New("the Resolve method has not been implemented.")
-}
-
-var _ resolutionframework.ConfigWatcher = &Resolver{}
-
-// GetConfigName returns the name of the cluster resolver's configmap.
-func (r *Resolver) GetConfigName(context.Context) string {
- return configMapName
-}
diff --git a/upstream/pkg/remoteresolution/resolver/cluster/resolver_test.go b/upstream/pkg/remoteresolution/resolver/cluster/resolver_test.go
deleted file mode 100644
index feecae799c7..00000000000
--- a/upstream/pkg/remoteresolution/resolver/cluster/resolver_test.go
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- Copyright 2024 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-*/
-
-package cluster_test
-
-import (
- "context"
- "encoding/base64"
- "encoding/hex"
- "errors"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- cluster "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster"
- frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- clusterresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- frameworktesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- duckv1 "knative.dev/pkg/apis/duck/v1"
- "knative.dev/pkg/system"
- _ "knative.dev/pkg/system/testing"
- "sigs.k8s.io/yaml"
-)
-
-const (
- disabledError = "cannot handle resolution request, enable-cluster-resolver feature flag not true"
-)
-
-func TestGetSelector(t *testing.T) {
- resolver := cluster.Resolver{}
- sel := resolver.GetSelector(context.Background())
- if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
- t.Fatalf("unexpected selector: %v", sel)
- } else if typ != cluster.LabelValueClusterResolverType {
- t.Fatalf("unexpected type: %q", typ)
- }
-}
-
-func TestValidate(t *testing.T) {
- resolver := cluster.Resolver{}
-
- params := []pipelinev1.Param{{
- Name: clusterresolution.KindParam,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: clusterresolution.NamespaceParam,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: clusterresolution.NameParam,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
-
- ctx := framework.InjectResolverConfigToContext(context.Background(), map[string]string{
- clusterresolution.AllowedNamespacesKey: "foo,bar",
- clusterresolution.BlockedNamespacesKey: "abc,def",
- })
-
- req := v1beta1.ResolutionRequestSpec{Params: params}
- if err := resolver.Validate(ctx, &req); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-}
-
-func TestValidateNotEnabled(t *testing.T) {
- resolver := cluster.Resolver{}
-
- var err error
-
- params := []pipelinev1.Param{{
- Name: clusterresolution.KindParam,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: clusterresolution.NamespaceParam,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: clusterresolution.NameParam,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- req := v1beta1.ResolutionRequestSpec{Params: params}
- err = resolver.Validate(resolverDisabledContext(), &req)
- if err == nil {
- t.Fatalf("expected disabled err")
- }
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-func TestValidateFailure(t *testing.T) {
- testCases := []struct {
- name string
- params map[string]string
- conf map[string]string
- expectedErr string
- }{
- {
- name: "missing kind",
- params: map[string]string{
- clusterresolution.NameParam: "foo",
- clusterresolution.NamespaceParam: "bar",
- },
- expectedErr: "missing required cluster resolver params: kind",
- }, {
- name: "invalid kind",
- params: map[string]string{
- clusterresolution.KindParam: "banana",
- clusterresolution.NamespaceParam: "foo",
- clusterresolution.NameParam: "bar",
- },
- expectedErr: "unknown or unsupported resource kind 'banana'",
- }, {
- name: "missing multiple",
- params: map[string]string{
- clusterresolution.KindParam: "task",
- },
- expectedErr: "missing required cluster resolver params: name, namespace",
- }, {
- name: "not in allowed namespaces",
- params: map[string]string{
- clusterresolution.KindParam: "task",
- clusterresolution.NamespaceParam: "foo",
- clusterresolution.NameParam: "baz",
- },
- conf: map[string]string{
- clusterresolution.AllowedNamespacesKey: "abc,def",
- },
- expectedErr: "access to specified namespace foo is not allowed",
- }, {
- name: "in blocked namespaces",
- params: map[string]string{
- clusterresolution.KindParam: "task",
- clusterresolution.NamespaceParam: "foo",
- clusterresolution.NameParam: "baz",
- },
- conf: map[string]string{
- clusterresolution.BlockedNamespacesKey: "foo,bar",
- },
- expectedErr: "access to specified namespace foo is blocked",
- },
- {
- name: "blocked by star",
- params: map[string]string{
- clusterresolution.KindParam: "task",
- clusterresolution.NamespaceParam: "foo",
- clusterresolution.NameParam: "baz",
- },
- conf: map[string]string{
- clusterresolution.BlockedNamespacesKey: "*",
- },
- expectedErr: "only explicit allowed access to namespaces is allowed",
- },
- {
- name: "blocked by star but allowed explicitly",
- params: map[string]string{
- clusterresolution.KindParam: "task",
- clusterresolution.NamespaceParam: "foo",
- clusterresolution.NameParam: "baz",
- },
- conf: map[string]string{
- clusterresolution.BlockedNamespacesKey: "*",
- clusterresolution.AllowedNamespacesKey: "foo",
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resolver := &cluster.Resolver{}
-
- ctx := context.Background()
- if len(tc.conf) > 0 {
- ctx = framework.InjectResolverConfigToContext(ctx, tc.conf)
- }
-
- var asParams []pipelinev1.Param
- for k, v := range tc.params {
- asParams = append(asParams, pipelinev1.Param{
- Name: k,
- Value: *pipelinev1.NewStructuredValues(v),
- })
- }
- req := v1beta1.ResolutionRequestSpec{Params: asParams}
- err := resolver.Validate(ctx, &req)
- if tc.expectedErr == "" {
- if err != nil {
- t.Fatalf("got unexpected error: %v", err)
- }
- return
- }
- if err == nil {
- t.Fatalf("got no error, but expected: %s", tc.expectedErr)
- }
- if d := cmp.Diff(tc.expectedErr, err.Error()); d != "" {
- t.Errorf("error did not match: %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func TestResolve(t *testing.T) {
- defaultNS := "pipeline-ns"
-
- exampleTask := &pipelinev1.Task{
- ObjectMeta: metav1.ObjectMeta{
- Name: "example-task",
- Namespace: "task-ns",
- ResourceVersion: "00002",
- UID: "a123",
- },
- TypeMeta: metav1.TypeMeta{
- Kind: string(pipelinev1beta1.NamespacedTaskKind),
- APIVersion: "tekton.dev/v1",
- },
- Spec: pipelinev1.TaskSpec{
- Steps: []pipelinev1.Step{{
- Name: "some-step",
- Image: "some-image",
- Command: []string{"something"},
- }},
- },
- }
- taskChecksum, err := exampleTask.Checksum()
- if err != nil {
- t.Fatalf("couldn't checksum task: %v", err)
- }
- taskAsYAML, err := yaml.Marshal(exampleTask)
- if err != nil {
- t.Fatalf("couldn't marshal task: %v", err)
- }
-
- examplePipeline := &pipelinev1.Pipeline{
- ObjectMeta: metav1.ObjectMeta{
- Name: "example-pipeline",
- Namespace: defaultNS,
- ResourceVersion: "00001",
- UID: "b123",
- },
- TypeMeta: metav1.TypeMeta{
- Kind: "Pipeline",
- APIVersion: "tekton.dev/v1",
- },
- Spec: pipelinev1.PipelineSpec{
- Tasks: []pipelinev1.PipelineTask{{
- Name: "some-pipeline-task",
- TaskRef: &pipelinev1.TaskRef{
- Name: "some-task",
- Kind: pipelinev1.NamespacedTaskKind,
- },
- }},
- },
- }
- pipelineChecksum, err := examplePipeline.Checksum()
- if err != nil {
- t.Fatalf("couldn't checksum pipeline: %v", err)
- }
- pipelineAsYAML, err := yaml.Marshal(examplePipeline)
- if err != nil {
- t.Fatalf("couldn't marshal pipeline: %v", err)
- }
-
- testCases := []struct {
- name string
- kind string
- resourceName string
- namespace string
- allowedNamespaces string
- blockedNamespaces string
- expectedStatus *v1beta1.ResolutionRequestStatus
- expectedErr error
- }{
- {
- name: "successful task",
- kind: "task",
- resourceName: exampleTask.Name,
- namespace: exampleTask.Namespace,
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{},
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString(taskAsYAML),
- RefSource: &pipelinev1.RefSource{
- URI: "/apis/tekton.dev/v1/namespaces/task-ns/task/example-task@a123",
- Digest: map[string]string{
- "sha256": hex.EncodeToString(taskChecksum),
- },
- },
- },
- },
- }, {
- name: "successful pipeline",
- kind: "pipeline",
- resourceName: examplePipeline.Name,
- namespace: examplePipeline.Namespace,
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{},
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString(pipelineAsYAML),
- RefSource: &pipelinev1.RefSource{
- URI: "/apis/tekton.dev/v1/namespaces/pipeline-ns/pipeline/example-pipeline@b123",
- Digest: map[string]string{
- "sha256": hex.EncodeToString(pipelineChecksum),
- },
- },
- },
- },
- }, {
- name: "default namespace",
- kind: "pipeline",
- resourceName: examplePipeline.Name,
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{},
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString(pipelineAsYAML),
- RefSource: &pipelinev1.RefSource{
- URI: "/apis/tekton.dev/v1/namespaces/pipeline-ns/pipeline/example-pipeline@b123",
- Digest: map[string]string{
- "sha256": hex.EncodeToString(pipelineChecksum),
- },
- },
- },
- },
- }, {
- name: "default kind",
- resourceName: exampleTask.Name,
- namespace: exampleTask.Namespace,
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{},
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString(taskAsYAML),
- RefSource: &pipelinev1.RefSource{
- URI: "/apis/tekton.dev/v1/namespaces/task-ns/task/example-task@a123",
- Digest: map[string]string{
- "sha256": hex.EncodeToString(taskChecksum),
- },
- },
- },
- },
- }, {
- name: "no such task",
- kind: "task",
- resourceName: exampleTask.Name,
- namespace: "other-ns",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &resolutioncommon.GetResourceError{
- ResolverName: cluster.ClusterResolverName,
- Key: "foo/rr",
- Original: errors.New(`tasks.tekton.dev "example-task" not found`),
- },
- }, {
- name: "not in allowed namespaces",
- kind: "task",
- resourceName: exampleTask.Name,
- namespace: "other-ns",
- allowedNamespaces: "foo,bar",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &resolutioncommon.InvalidRequestError{
- ResolutionRequestKey: "foo/rr",
- Message: "access to specified namespace other-ns is not allowed",
- },
- }, {
- name: "in blocked namespaces",
- kind: "task",
- resourceName: exampleTask.Name,
- namespace: "other-ns",
- blockedNamespaces: "foo,other-ns,bar",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &resolutioncommon.InvalidRequestError{
- ResolutionRequestKey: "foo/rr",
- Message: "access to specified namespace other-ns is blocked",
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- request := createRequest(tc.kind, tc.resourceName, tc.namespace)
-
- confMap := map[string]string{
- clusterresolution.DefaultKindKey: "task",
- clusterresolution.DefaultNamespaceKey: defaultNS,
- }
- if tc.allowedNamespaces != "" {
- confMap[clusterresolution.AllowedNamespacesKey] = tc.allowedNamespaces
- }
- if tc.blockedNamespaces != "" {
- confMap[clusterresolution.BlockedNamespacesKey] = tc.blockedNamespaces
- }
-
- d := test.Data{
- ConfigMaps: []*corev1.ConfigMap{{
- ObjectMeta: metav1.ObjectMeta{
- Name: "cluster-resolver-config",
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: confMap,
- }, {
- ObjectMeta: metav1.ObjectMeta{
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- Name: resolverconfig.GetFeatureFlagsConfigName(),
- },
- Data: map[string]string{
- "enable-cluster-resolver": "true",
- },
- }},
- Pipelines: []*pipelinev1.Pipeline{examplePipeline},
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- Tasks: []*pipelinev1.Task{exampleTask},
- }
-
- resolver := &cluster.Resolver{}
-
- var expectedStatus *v1beta1.ResolutionRequestStatus
- if tc.expectedStatus != nil {
- expectedStatus = tc.expectedStatus.DeepCopy()
-
- if tc.expectedErr == nil {
- reqParams := make(map[string]pipelinev1.ParamValue)
- for _, p := range request.Spec.Params {
- reqParams[p.Name] = p.Value
- }
- if expectedStatus.Annotations == nil {
- expectedStatus.Annotations = make(map[string]string)
- }
- expectedStatus.Annotations[clusterresolution.ResourceNameAnnotation] = reqParams[clusterresolution.NameParam].StringVal
- if reqParams[clusterresolution.NamespaceParam].StringVal != "" {
- expectedStatus.Annotations[clusterresolution.ResourceNamespaceAnnotation] = reqParams[clusterresolution.NamespaceParam].StringVal
- } else {
- expectedStatus.Annotations[clusterresolution.ResourceNamespaceAnnotation] = defaultNS
- }
- } else {
- expectedStatus.Status.Conditions[0].Message = tc.expectedErr.Error()
- }
- expectedStatus.Source = expectedStatus.RefSource
- }
-
- frtesting.RunResolverReconcileTest(ctx, t, d, resolver, request, expectedStatus, tc.expectedErr)
- })
- }
-}
-
-func createRequest(kind, name, namespace string) *v1beta1.ResolutionRequest {
- rr := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: cluster.LabelValueClusterResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: clusterresolution.NameParam,
- Value: *pipelinev1.NewStructuredValues(name),
- }},
- },
- }
- if kind != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: clusterresolution.KindParam,
- Value: *pipelinev1.NewStructuredValues(kind),
- })
- }
- if namespace != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: clusterresolution.NamespaceParam,
- Value: *pipelinev1.NewStructuredValues(namespace),
- })
- }
-
- return rr
-}
-
-func resolverDisabledContext() context.Context {
- return frameworktesting.ContextWithClusterResolverDisabled(context.Background())
-}
diff --git a/upstream/pkg/remoteresolution/resolver/doc.go b/upstream/pkg/remoteresolution/resolver/doc.go
deleted file mode 100644
index 784827d34fd..00000000000
--- a/upstream/pkg/remoteresolution/resolver/doc.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package resolver contains the upgraded remote resolution framework.
-It contains the upgraded framework and the built-in resolves.
-It is equivalent to `pkg/resolution/resolver`.
-This was necessary to ensure backwards compatibility with the existing framework.
-
-This package is subject to further refactoring and changes.
-*/
-package resolver
diff --git a/upstream/pkg/remoteresolution/resolver/framework/controller.go b/upstream/pkg/remoteresolution/resolver/framework/controller.go
deleted file mode 100644
index 665ecd89e93..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/controller.go
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
-Copyright 2022 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package framework
-
-import (
- "context"
- "strings"
-
- rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client"
- rrinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1beta1/resolutionrequest"
- framework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "k8s.io/client-go/tools/cache"
- "k8s.io/utils/clock"
- kubeclient "knative.dev/pkg/client/injection/kube/client"
- "knative.dev/pkg/configmap"
- "knative.dev/pkg/controller"
- "knative.dev/pkg/logging"
-)
-
-// ReconcilerModifier is a func that can access and modify a reconciler
-// in the moments before a resolver is started. It allows for
-// things like injecting a test clock.
-type ReconcilerModifier = func(reconciler *Reconciler)
-
-// NewController returns a knative controller for a Tekton Resolver.
-// This sets up a lot of the boilerplate that individual resolvers
-// shouldn't need to be concerned with since it's common to all of them.
-func NewController(ctx context.Context, resolver Resolver, modifiers ...ReconcilerModifier) func(context.Context, configmap.Watcher) *controller.Impl {
- if err := framework.ValidateResolver(ctx, resolver.GetSelector(ctx)); err != nil {
- panic(err.Error())
- }
- return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
- logger := logging.FromContext(ctx)
- kubeclientset := kubeclient.Get(ctx)
- rrclientset := rrclient.Get(ctx)
- rrInformer := rrinformer.Get(ctx)
-
- if err := resolver.Initialize(ctx); err != nil {
- panic(err.Error())
- }
-
- r := &Reconciler{
- LeaderAwareFuncs: framework.LeaderAwareFuncs(rrInformer.Lister()),
- kubeClientSet: kubeclientset,
- resolutionRequestLister: rrInformer.Lister(),
- resolutionRequestClientSet: rrclientset,
- resolver: resolver,
- }
-
- watchConfigChanges(ctx, r, cmw)
-
- // TODO(sbwsg): Do better sanitize.
- resolverName := resolver.GetName(ctx)
- resolverName = strings.ReplaceAll(resolverName, "/", "")
- resolverName = strings.ReplaceAll(resolverName, " ", "")
-
- applyModifiersAndDefaults(ctx, r, modifiers)
-
- impl := controller.NewContext(ctx, r, controller.ControllerOptions{
- WorkQueueName: "TektonResolverFramework." + resolverName,
- Logger: logger,
- })
-
- _, err := rrInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
- FilterFunc: framework.FilterResolutionRequestsBySelector(resolver.GetSelector(ctx)),
- Handler: cache.ResourceEventHandlerFuncs{
- AddFunc: impl.Enqueue,
- UpdateFunc: func(oldObj, newObj interface{}) {
- impl.Enqueue(newObj)
- },
- // TODO(sbwsg): should we deliver delete events
- // to the resolver?
- // DeleteFunc: impl.Enqueue,
- },
- })
- if err != nil {
- logging.FromContext(ctx).Panicf("Couldn't register ResolutionRequest informer event handler: %w", err)
- }
-
- return impl
- }
-}
-
-// watchConfigChanges binds a framework.Resolver to updates on its
-// configmap, using knative's configmap helpers. This is only done if
-// the resolver implements the framework.ConfigWatcher interface.
-func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
- if configWatcher, ok := reconciler.resolver.(framework.ConfigWatcher); ok {
- logger := logging.FromContext(ctx)
- resolverConfigName := configWatcher.GetConfigName(ctx)
- if resolverConfigName == "" {
- panic("resolver returned empty config name")
- }
- reconciler.configStore = framework.NewConfigStore(resolverConfigName, logger)
- reconciler.configStore.WatchConfigs(cmw)
- }
-}
-
-// applyModifiersAndDefaults applies the given modifiers to
-// a reconciler and, after doing so, sets any default values for things
-// that weren't set by a modifier.
-func applyModifiersAndDefaults(ctx context.Context, r *Reconciler, modifiers []ReconcilerModifier) {
- for _, mod := range modifiers {
- mod(r)
- }
-
- if r.Clock == nil {
- r.Clock = clock.RealClock{}
- }
-}
diff --git a/upstream/pkg/remoteresolution/resolver/framework/doc.go b/upstream/pkg/remoteresolution/resolver/framework/doc.go
deleted file mode 100644
index 765551cab71..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package framework contains the upgraded remote resolution framework.
-It is equivalent to `pkg/resolution/resolver/framework`.
-This was necessary to ensure backwards compatibility with the existing framework.
-
-This package is subject to further refactoring and changes.
-*/
-package framework
diff --git a/upstream/pkg/remoteresolution/resolver/framework/fakeresolver.go b/upstream/pkg/remoteresolution/resolver/framework/fakeresolver.go
deleted file mode 100644
index 046ec12f740..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/fakeresolver.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- Copyright 2022 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package framework
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
-)
-
-const FakeUrl string = "fake://url"
-
-var _ Resolver = &FakeResolver{}
-
-// FakeResolver implements a framework.Resolver that can fetch pre-configured strings based on a parameter value, or return
-// resolution attempts with a configured error.
-type FakeResolver framework.FakeResolver
-
-// Initialize performs any setup required by the fake resolver.
-func (r *FakeResolver) Initialize(ctx context.Context) error {
- if r.ForParam == nil {
- r.ForParam = make(map[string]*framework.FakeResolvedResource)
- }
- return nil
-}
-
-// GetName returns the string name that the fake resolver should be
-// associated with.
-func (r *FakeResolver) GetName(_ context.Context) string {
- return framework.FakeResolverName
-}
-
-// GetSelector returns the labels that resource requests are required to have for
-// the fake resolver to process them.
-func (r *FakeResolver) GetSelector(_ context.Context) map[string]string {
- return map[string]string{
- resolutioncommon.LabelKeyResolverType: framework.LabelValueFakeResolverType,
- }
-}
-
-// Validate returns an error if the given parameter map is not
-// valid for a resource request targeting the fake resolver.
-func (r *FakeResolver) Validate(_ context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return framework.ValidateParams(req.Params)
- }
- if req.URL != FakeUrl {
- return fmt.Errorf("Wrong url. Expected: %s, Got: %s", FakeUrl, req.URL)
- }
- return nil
-}
-
-// Resolve performs the work of fetching a file from the fake resolver given a map of
-// parameters.
-func (r *FakeResolver) Resolve(_ context.Context, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- return framework.Resolve(req.Params, r.ForParam)
- }
- frr, ok := r.ForParam[req.URL]
- if !ok {
- return nil, fmt.Errorf("couldn't find resource for url %s", req.URL)
- }
- return frr, nil
-}
-
-var _ framework.TimedResolution = &FakeResolver{}
-
-// GetResolutionTimeout returns the configured timeout for the reconciler, or the default time.Duration if not configured.
-func (r *FakeResolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
- return framework.GetResolutionTimeout(r.Timeout, defaultTimeout), nil
-}
diff --git a/upstream/pkg/remoteresolution/resolver/framework/interface.go b/upstream/pkg/remoteresolution/resolver/framework/interface.go
deleted file mode 100644
index 53cc9443143..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/interface.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright 2022 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package framework
-
-import (
- "context"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
-)
-
-// Resolver is the interface to implement for type-specific resource
-// resolution. It fetches resources from a given type of remote location
-// and returns their content along with any associated annotations.
-type Resolver interface {
- // Initialize is called at the moment the resolver controller is
- // instantiated and is a good place to setup things like
- // resource listers.
- Initialize(ctx context.Context) error
-
- // GetName should give back the name of the resolver. E.g. "Git"
- GetName(ctx context.Context) string
-
- // GetSelector returns the labels that are used to direct resolution
- // requests to this resolver.
- GetSelector(ctx context.Context) map[string]string
-
- // Validate is given the ressolution request spec
- // should return an error if the resolver cannot resolve it.
- Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error
-
- // ResolveRequest receives the resolution request spec
- // and returns the resolved data along with any annotations
- // to include in the response. If resolution fails then an error
- // should be returned instead. If a resolution.Error
- // is returned then its Reason and Message are used as part of the
- // response to the request.
- Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error)
-}
diff --git a/upstream/pkg/remoteresolution/resolver/framework/reconciler.go b/upstream/pkg/remoteresolution/resolver/framework/reconciler.go
deleted file mode 100644
index 4e35557fe47..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/reconciler.go
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
-Copyright 2022 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package framework
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "time"
-
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
- rrv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/tools/cache"
- "k8s.io/utils/clock"
- "knative.dev/pkg/controller"
- "knative.dev/pkg/logging"
- "knative.dev/pkg/reconciler"
-)
-
-// defaultMaximumResolutionDuration is the maximum amount of time
-// resolution may take.
-
-// defaultMaximumResolutionDuration is the max time that a call to
-// Resolve() may take. It can be overridden by a resolver implementing
-// the framework.TimedResolution interface.
-const defaultMaximumResolutionDuration = time.Minute
-
-// statusDataPatch is the json structure that will be PATCHed into
-// a ResolutionRequest with its data and annotations once successfully
-// resolved.
-type statusDataPatch struct {
- Annotations map[string]string `json:"annotations"`
- Data string `json:"data"`
- Source *pipelinev1beta1.ConfigSource `json:"source"`
- RefSource *pipelinev1.RefSource `json:"refSource"`
-}
-
-// Reconciler handles ResolutionRequest objects, performs functionality
-// common to all resolvers and delegates resolver-specific actions
-// to its embedded type-specific Resolver object.
-type Reconciler struct {
- // Implements reconciler.LeaderAware
- reconciler.LeaderAwareFuncs
-
- // Clock is used by the reconciler to track the passage of time
- // and can be overridden for tests.
- Clock clock.PassiveClock
-
- resolver Resolver
- kubeClientSet kubernetes.Interface
- resolutionRequestLister rrv1beta1.ResolutionRequestLister
- resolutionRequestClientSet rrclient.Interface
-
- configStore *framework.ConfigStore
-}
-
-var _ reconciler.LeaderAware = &Reconciler{}
-
-// Reconcile receives the string key of a ResolutionRequest object, looks
-// it up, checks it for common errors, and then delegates
-// resolver-specific functionality to the reconciler's embedded
-// type-specific resolver. Any errors that occur during validation or
-// resolution are handled by updating or failing the ResolutionRequest.
-func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
- namespace, name, err := cache.SplitMetaNamespaceKey(key)
- if err != nil {
- err = &resolutioncommon.InvalidResourceKeyError{Key: key, Original: err}
- return controller.NewPermanentError(err)
- }
-
- rr, err := r.resolutionRequestLister.ResolutionRequests(namespace).Get(name)
- if err != nil {
- err := &resolutioncommon.GetResourceError{ResolverName: "resolutionrequest", Key: key, Original: err}
- return controller.NewPermanentError(err)
- }
-
- if rr.IsDone() {
- return nil
- }
-
- // Inject request-scoped information into the context, such as
- // the namespace that the request originates from and the
- // configuration from the configmap this resolver is watching.
- ctx = resolutioncommon.InjectRequestNamespace(ctx, namespace)
- ctx = resolutioncommon.InjectRequestName(ctx, name)
- if r.configStore != nil {
- ctx = r.configStore.ToContext(ctx)
- }
-
- return r.resolve(ctx, key, rr)
-}
-
-func (r *Reconciler) resolve(ctx context.Context, key string, rr *v1beta1.ResolutionRequest) error {
- errChan := make(chan error)
- resourceChan := make(chan framework.ResolvedResource)
-
- paramsMap := make(map[string]string)
- for _, p := range rr.Spec.Params {
- paramsMap[p.Name] = p.Value.StringVal
- }
-
- timeoutDuration := defaultMaximumResolutionDuration
- if timed, ok := r.resolver.(framework.TimedResolution); ok {
- var err error
- timeoutDuration, err = timed.GetResolutionTimeout(ctx, defaultMaximumResolutionDuration, paramsMap)
- if err != nil {
- return err
- }
- }
-
- // A new context is created for resolution so that timeouts can
- // be enforced without affecting other uses of ctx (e.g. sending
- // Updates to ResolutionRequest objects).
- resolutionCtx, cancelFn := context.WithTimeout(ctx, timeoutDuration)
- defer cancelFn()
-
- go func() {
- validationError := r.resolver.Validate(resolutionCtx, &rr.Spec)
- if validationError != nil {
- errChan <- &resolutioncommon.InvalidRequestError{
- ResolutionRequestKey: key,
- Message: validationError.Error(),
- }
- return
- }
- resource, resolveErr := r.resolver.Resolve(resolutionCtx, &rr.Spec)
- if resolveErr != nil {
- errChan <- &resolutioncommon.GetResourceError{
- ResolverName: r.resolver.GetName(resolutionCtx),
- Key: key,
- Original: resolveErr,
- }
- return
- }
- resourceChan <- resource
- }()
-
- select {
- case err := <-errChan:
- if err != nil {
- return r.OnError(ctx, rr, err)
- }
- case <-resolutionCtx.Done():
- if err := resolutionCtx.Err(); err != nil {
- return r.OnError(ctx, rr, err)
- }
- case resource := <-resourceChan:
- return r.writeResolvedData(ctx, rr, resource)
- }
-
- return errors.New("unknown error")
-}
-
-// OnError is used to handle any situation where a ResolutionRequest has
-// reached a terminal situation that cannot be recovered from.
-func (r *Reconciler) OnError(ctx context.Context, rr *v1beta1.ResolutionRequest, err error) error {
- if resolutioncommon.IsErrTransient(err) {
- return err
- }
- if rr == nil {
- return controller.NewPermanentError(err)
- }
- if err != nil {
- _ = r.MarkFailed(ctx, rr, err)
- return controller.NewPermanentError(err)
- }
- return nil
-}
-
-// MarkFailed updates a ResolutionRequest as having failed. It returns
-// errors that occur during the update process or nil if the update
-// appeared to succeed.
-func (r *Reconciler) MarkFailed(ctx context.Context, rr *v1beta1.ResolutionRequest, resolutionErr error) error {
- key := fmt.Sprintf("%s/%s", rr.Namespace, rr.Name)
- reason, resolutionErr := resolutioncommon.ReasonError(resolutionErr)
- latestGeneration, err := r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Get(ctx, rr.Name, metav1.GetOptions{})
- if err != nil {
- logging.FromContext(ctx).Warnf("error getting latest generation of resolutionrequest %q: %v", key, err)
- return err
- }
- if latestGeneration.IsDone() {
- return nil
- }
- latestGeneration.Status.MarkFailed(reason, resolutionErr.Error())
- _, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).UpdateStatus(ctx, latestGeneration, metav1.UpdateOptions{})
- if err != nil {
- logging.FromContext(ctx).Warnf("error marking resolutionrequest %q as failed: %v", key, err)
- return err
- }
- return nil
-}
-
-func (r *Reconciler) writeResolvedData(ctx context.Context, rr *v1beta1.ResolutionRequest, resource framework.ResolvedResource) error {
- encodedData := base64.StdEncoding.Strict().EncodeToString(resource.Data())
- patchBytes, err := json.Marshal(map[string]statusDataPatch{
- "status": {
- Data: encodedData,
- Annotations: resource.Annotations(),
- RefSource: resource.RefSource(),
- Source: (*pipelinev1beta1.ConfigSource)(resource.RefSource()),
- },
- })
- if err != nil {
- logging.FromContext(ctx).Warnf("writeResolvedData error serializing resource request patch for resolution request %s:%s: %s", rr.Namespace, rr.Name, err.Error())
- return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
- ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
- Original: fmt.Errorf("error serializing resource request patch: %w", err),
- })
- }
- _, err = r.resolutionRequestClientSet.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Patch(ctx, rr.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
- if err != nil {
- logging.FromContext(ctx).Warnf("writeResolvedData error patching resolution request %s:%s: %s", rr.Namespace, rr.Name, err.Error())
- return r.OnError(ctx, rr, &resolutioncommon.UpdatingRequestError{
- ResolutionRequestKey: fmt.Sprintf("%s/%s", rr.Namespace, rr.Name),
- Original: err,
- })
- }
-
- return nil
-}
diff --git a/upstream/pkg/remoteresolution/resolver/framework/reconciler_test.go b/upstream/pkg/remoteresolution/resolver/framework/reconciler_test.go
deleted file mode 100644
index 1a437ddfc89..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/reconciler_test.go
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- Copyright 2022 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package framework_test
-
-import (
- "context"
- "encoding/base64"
- "errors"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- "github.com/tektoncd/pipeline/test/names"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/tools/record"
- clock "k8s.io/utils/clock/testing"
- "knative.dev/pkg/apis"
- duckv1 "knative.dev/pkg/apis/duck/v1"
- cminformer "knative.dev/pkg/configmap/informer"
- "knative.dev/pkg/controller"
- "knative.dev/pkg/logging"
- pkgreconciler "knative.dev/pkg/reconciler"
- "knative.dev/pkg/system"
- _ "knative.dev/pkg/system/testing" // Setup system.Namespace()
-)
-
-var (
- now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
- testClock = clock.NewFakePassiveClock(now)
- ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time")
-)
-
-func TestReconcile(t *testing.T) {
- testCases := []struct {
- name string
- inputRequest *v1beta1.ResolutionRequest
- paramMap map[string]*resolutionframework.FakeResolvedResource
- reconcilerTimeout time.Duration
- expectedStatus *v1beta1.ResolutionRequestStatus
- expectedErr error
- transient bool
- }{
- {
- name: "unknown value",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: resolutionframework.FakeParamName,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- expectedErr: errors.New("error getting \"Fake\" \"foo/rr\": couldn't find resource for param value bar"),
- }, {
- name: "known value",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: resolutionframework.FakeParamName,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- paramMap: map[string]*resolutionframework.FakeResolvedResource{
- "bar": {
- Content: "some content",
- AnnotationMap: map[string]string{"foo": "bar"},
- ContentSource: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- },
- },
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{
- Annotations: map[string]string{
- "foo": "bar",
- },
- },
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString([]byte("some content")),
- RefSource: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- Source: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- },
- },
- }, {
- name: "unknown url",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: "dne://does-not-exist",
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- expectedErr: errors.New("invalid resource request \"foo/rr\": Wrong url. Expected: fake://url, Got: dne://does-not-exist"),
- }, {
- name: "valid url",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: framework.FakeUrl,
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- paramMap: map[string]*resolutionframework.FakeResolvedResource{
- framework.FakeUrl: {
- Content: "some content",
- AnnotationMap: map[string]string{"foo": "bar"},
- ContentSource: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- },
- },
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{
- Annotations: map[string]string{
- "foo": "bar",
- },
- },
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString([]byte("some content")),
- RefSource: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- Source: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- },
- },
- }, {
- name: "resource not found for url",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- URL: framework.FakeUrl,
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- paramMap: map[string]*resolutionframework.FakeResolvedResource{
- "other://resource": {
- Content: "some content",
- AnnotationMap: map[string]string{"foo": "bar"},
- ContentSource: &pipelinev1.RefSource{
- URI: "https://abc.com",
- Digest: map[string]string{
- "sha1": "xyz",
- },
- EntryPoint: "foo/bar",
- },
- },
- },
- expectedErr: errors.New("error getting \"Fake\" \"foo/rr\": couldn't find resource for url fake://url"),
- }, {
- name: "invalid params",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: "not-a-fake-param",
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- expectedErr: errors.New(`invalid resource request "foo/rr": missing fake-key`),
- }, {
- name: "error resolving",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: resolutionframework.FakeParamName,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- paramMap: map[string]*resolutionframework.FakeResolvedResource{
- "bar": {
- ErrorWith: "fake failure",
- },
- },
- expectedErr: errors.New(`error getting "Fake" "foo/rr": fake failure`),
- }, {
- name: "timeout",
- inputRequest: &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now().Add(-59 * time.Second)}, // 1 second before default timeout
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: resolutionframework.LabelValueFakeResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: resolutionframework.FakeParamName,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }},
- },
- Status: v1beta1.ResolutionRequestStatus{},
- },
- paramMap: map[string]*resolutionframework.FakeResolvedResource{
- "bar": {
- WaitFor: 1100 * time.Millisecond,
- },
- },
- reconcilerTimeout: 1 * time.Second,
- expectedErr: errors.New("context deadline exceeded"),
- transient: true,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- d := test.Data{
- ResolutionRequests: []*v1beta1.ResolutionRequest{tc.inputRequest},
- }
-
- fakeResolver := &framework.FakeResolver{ForParam: tc.paramMap}
- if tc.reconcilerTimeout > 0 {
- fakeResolver.Timeout = tc.reconcilerTimeout
- }
-
- ctx, _ := ttesting.SetupFakeContext(t)
- testAssets, cancel := getResolverFrameworkController(ctx, t, d, fakeResolver, setClockOnReconciler)
- defer cancel()
-
- err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRequestName(tc.inputRequest))
- if tc.expectedErr != nil {
- if err == nil {
- t.Fatalf("expected to get error %v, but got nothing", tc.expectedErr)
- }
- if tc.expectedErr.Error() != err.Error() {
- t.Fatalf("expected to get error %v, but got %v", tc.expectedErr, err)
- }
- if tc.transient && controller.IsPermanentError(err) {
- t.Fatalf("exepected error to not be wrapped as permanent %v", err)
- }
- } else {
- if err != nil {
- if ok, _ := controller.IsRequeueKey(err); !ok {
- t.Fatalf("did not expect an error, but got %v", err)
- }
- }
-
- c := testAssets.Clients.ResolutionRequests.ResolutionV1beta1()
- reconciledRR, err := c.ResolutionRequests(tc.inputRequest.Namespace).Get(testAssets.Ctx, tc.inputRequest.Name, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("getting updated ResolutionRequest: %v", err)
- }
- if d := cmp.Diff(*tc.expectedStatus, reconciledRR.Status, ignoreLastTransitionTime); d != "" {
- t.Errorf("ResolutionRequest status doesn't match %s", diff.PrintWantGot(d))
- }
- }
- })
- }
-}
-
-func getResolverFrameworkController(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
- t.Helper()
- names.TestingSeed()
-
- ctx, cancel := context.WithCancel(ctx)
- c, informers := test.SeedTestData(t, ctx, d)
- configMapWatcher := cminformer.NewInformedWatcher(c.Kube, system.Namespace())
- ctl := framework.NewController(ctx, resolver, modifiers...)(ctx, configMapWatcher)
- if err := configMapWatcher.Start(ctx.Done()); err != nil {
- t.Fatalf("error starting configmap watcher: %v", err)
- }
-
- if la, ok := ctl.Reconciler.(pkgreconciler.LeaderAware); ok {
- _ = la.Promote(pkgreconciler.UniversalBucket(), func(pkgreconciler.Bucket, types.NamespacedName) {})
- }
-
- return test.Assets{
- Logger: logging.FromContext(ctx),
- Controller: ctl,
- Clients: c,
- Informers: informers,
- Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder),
- Ctx: ctx,
- }, cancel
-}
-
-func getRequestName(rr *v1beta1.ResolutionRequest) string {
- return strings.Join([]string{rr.Namespace, rr.Name}, "/")
-}
-
-func setClockOnReconciler(r *framework.Reconciler) {
- if r.Clock == nil {
- r.Clock = testClock
- }
-}
diff --git a/upstream/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go b/upstream/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go
deleted file mode 100644
index eefee4263da..00000000000
--- a/upstream/pkg/remoteresolution/resolver/framework/testing/fakecontroller.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- Copyright 2022 The Tekton Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package testing
-
-import (
- "context"
- "encoding/base64"
- "strings"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- "github.com/tektoncd/pipeline/test/names"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/tools/record"
- testclock "k8s.io/utils/clock/testing"
- "knative.dev/pkg/apis"
- cminformer "knative.dev/pkg/configmap/informer"
- "knative.dev/pkg/controller"
- "knative.dev/pkg/logging"
- pkgreconciler "knative.dev/pkg/reconciler"
- "knative.dev/pkg/system"
-)
-
-var (
- now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC)
- testClock = testclock.NewFakePassiveClock(now)
- ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time")
-)
-
-// ResolverReconcileTestModifier is a function thaat will be invoked after the test assets and controller have been created
-type ResolverReconcileTestModifier = func(resolver framework.Resolver, testAssets test.Assets)
-
-// RunResolverReconcileTest takes data to seed clients and informers, a Resolver, a ResolutionRequest, and the expected
-// ResolutionRequestStatus and error, both of which can be nil. It instantiates a controller for that resolver and
-// reconciles the given request. It then checks for the expected error, if any, and compares the resulting status with
-// the expected status.
-func RunResolverReconcileTest(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, request *v1beta1.ResolutionRequest,
- expectedStatus *v1beta1.ResolutionRequestStatus, expectedErr error, resolverModifiers ...ResolverReconcileTestModifier) {
- t.Helper()
-
- testAssets, cancel := GetResolverFrameworkController(ctx, t, d, resolver, setClockOnReconciler)
- defer cancel()
-
- for _, rm := range resolverModifiers {
- rm(resolver, testAssets)
- }
-
- err := testAssets.Controller.Reconciler.Reconcile(testAssets.Ctx, getRequestName(request)) //nolint
- if expectedErr != nil {
- if err == nil {
- t.Fatalf("expected to get error: `%v`, but got nothing", expectedErr)
- }
- if expectedErr.Error() != err.Error() {
- t.Fatalf("expected to get error `%v`, but got `%v`", expectedErr, err)
- }
- } else if err != nil {
- if ok, _ := controller.IsRequeueKey(err); !ok {
- t.Fatalf("did not expect an error, but got `%v`", err)
- }
- }
-
- c := testAssets.Clients.ResolutionRequests.ResolutionV1beta1()
- reconciledRR, err := c.ResolutionRequests(request.Namespace).Get(testAssets.Ctx, request.Name, metav1.GetOptions{}) //nolint
- if err != nil {
- t.Fatalf("getting updated ResolutionRequest: %v", err)
- }
- if expectedStatus != nil {
- if d := cmp.Diff(*expectedStatus, reconciledRR.Status, ignoreLastTransitionTime); d != "" {
- t.Errorf("ResolutionRequest status doesn't match %s", diff.PrintWantGot(d))
- if expectedStatus.Data != "" && expectedStatus.Data != reconciledRR.Status.Data {
- decodedExpectedData, err := base64.StdEncoding.Strict().DecodeString(expectedStatus.Data)
- if err != nil {
- t.Errorf("couldn't decode expected data: %v", err)
- return
- }
- decodedGotData, err := base64.StdEncoding.Strict().DecodeString(reconciledRR.Status.Data)
- if err != nil {
- t.Errorf("couldn't decode reconciled data: %v", err)
- return
- }
- if d := cmp.Diff(decodedExpectedData, decodedGotData); d != "" {
- t.Errorf("decoded data did not match expected: %s", diff.PrintWantGot(d))
- }
- }
- }
- }
-}
-
-// GetResolverFrameworkController returns an instance of the resolver framework controller/reconciler using the given resolver,
-// seeded with d, where d represents the state of the system (existing resources) needed for the test.
-func GetResolverFrameworkController(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
- t.Helper()
- names.TestingSeed()
- return initializeResolverFrameworkControllerAssets(ctx, t, d, resolver, modifiers...)
-}
-
-func initializeResolverFrameworkControllerAssets(ctx context.Context, t *testing.T, d test.Data, resolver framework.Resolver, modifiers ...framework.ReconcilerModifier) (test.Assets, func()) {
- t.Helper()
- ctx, cancel := context.WithCancel(ctx)
- ensureConfigurationConfigMapsExist(&d)
- c, informers := test.SeedTestData(t, ctx, d)
- configMapWatcher := cminformer.NewInformedWatcher(c.Kube, resolverconfig.ResolversNamespace(system.Namespace()))
- ctl := framework.NewController(ctx, resolver, modifiers...)(ctx, configMapWatcher)
- if err := configMapWatcher.Start(ctx.Done()); err != nil {
- t.Fatalf("error starting configmap watcher: %v", err)
- }
-
- if la, ok := ctl.Reconciler.(pkgreconciler.LeaderAware); ok {
- _ = la.Promote(pkgreconciler.UniversalBucket(), func(pkgreconciler.Bucket, types.NamespacedName) {})
- }
-
- return test.Assets{
- Logger: logging.FromContext(ctx),
- Controller: ctl,
- Clients: c,
- Informers: informers,
- Recorder: controller.GetEventRecorder(ctx).(*record.FakeRecorder),
- Ctx: ctx,
- }, cancel
-}
-
-func getRequestName(rr *v1beta1.ResolutionRequest) string {
- return strings.Join([]string{rr.Namespace, rr.Name}, "/")
-}
-
-func setClockOnReconciler(r *framework.Reconciler) {
- if r.Clock == nil {
- r.Clock = testClock
- }
-}
-
-func ensureConfigurationConfigMapsExist(d *test.Data) {
- var featureFlagsExists bool
- for _, cm := range d.ConfigMaps {
- if cm.Name == resolverconfig.GetFeatureFlagsConfigName() {
- featureFlagsExists = true
- }
- }
- if !featureFlagsExists {
- d.ConfigMaps = append(d.ConfigMaps, &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Name: resolverconfig.GetFeatureFlagsConfigName(),
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: map[string]string{},
- })
- }
-}
diff --git a/upstream/pkg/remoteresolution/resolver/git/resolver.go b/upstream/pkg/remoteresolution/resolver/git/resolver.go
deleted file mode 100644
index 3a3734d4fbf..00000000000
--- a/upstream/pkg/remoteresolution/resolver/git/resolver.go
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package git
-
-import (
- "context"
- "errors"
- "time"
-
- "github.com/jenkins-x/go-scm/scm"
- "github.com/jenkins-x/go-scm/scm/factory"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
- "go.uber.org/zap"
- "k8s.io/apimachinery/pkg/util/cache"
- "k8s.io/client-go/kubernetes"
- kubeclient "knative.dev/pkg/client/injection/kube/client"
- "knative.dev/pkg/logging"
-)
-
-const (
- disabledError = "cannot handle resolution request, enable-git-resolver feature flag not true"
-
- // labelValueGitResolverType is the value to use for the
- // resolution.tekton.dev/type label on resource requests
- labelValueGitResolverType string = "git"
-
- // gitResolverName is the name that the git resolver should be
- // associated with
- gitResolverName string = "Git"
-
- // ConfigMapName is the git resolver's config map
- ConfigMapName = "git-resolver-config"
-
- // cacheSize is the size of the LRU secrets cache
- cacheSize = 1024
- // ttl is the time to live for a cache entry
- ttl = 5 * time.Minute
-)
-
-var _ framework.Resolver = &Resolver{}
-
-// Resolver implements a framework.Resolver that can fetch files from git.
-type Resolver struct {
- kubeClient kubernetes.Interface
- logger *zap.SugaredLogger
- cache *cache.LRUExpireCache
- ttl time.Duration
-
- // Used in testing
- clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)
-}
-
-// Initialize performs any setup required by the gitresolver.
-func (r *Resolver) Initialize(ctx context.Context) error {
- r.kubeClient = kubeclient.Get(ctx)
- r.logger = logging.FromContext(ctx)
- r.cache = cache.NewLRUExpireCache(cacheSize)
- r.ttl = ttl
- if r.clientFunc == nil {
- r.clientFunc = factory.NewClient
- }
- return nil
-}
-
-// GetName returns the string name that the gitresolver should be
-// associated with.
-func (r *Resolver) GetName(_ context.Context) string {
- return gitResolverName
-}
-
-// GetSelector returns the labels that resource requests are required to have for
-// the gitresolver to process them.
-func (r *Resolver) GetSelector(_ context.Context) map[string]string {
- return map[string]string{
- resolutioncommon.LabelKeyResolverType: labelValueGitResolverType,
- }
-}
-
-// ValidateParams returns an error if the given parameter map is not
-// valid for a resource request targeting the gitresolver.
-func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return git.ValidateParams(ctx, req.Params)
- }
- // Remove this error once validate url has been implemented.
- return errors.New("cannot validate request. the Validate method has not been implemented.")
-}
-
-// Resolve performs the work of fetching a file from git given a map of
-// parameters.
-func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- origParams := req.Params
-
- if git.IsDisabled(ctx) {
- return nil, errors.New(disabledError)
- }
-
- params, err := git.PopulateDefaultParams(ctx, origParams)
- if err != nil {
- return nil, err
- }
-
- if params[git.UrlParam] != "" {
- return git.ResolveAnonymousGit(ctx, params)
- }
-
- return git.ResolveAPIGit(ctx, params, r.kubeClient, r.logger, r.cache, r.ttl, r.clientFunc)
- }
- // Remove this error once resolution of url has been implemented.
- return nil, errors.New("the Resolve method has not been implemented.")
-}
-
-var _ resolutionframework.ConfigWatcher = &Resolver{}
-
-// GetConfigName returns the name of the git resolver's configmap.
-func (r *Resolver) GetConfigName(context.Context) string {
- return ConfigMapName
-}
-
-var _ resolutionframework.TimedResolution = &Resolver{}
-
-// GetResolutionTimeout returns a time.Duration for the amount of time a
-// single git fetch may take. This can be configured with the
-// fetch-timeout field in the git-resolver-config configmap.
-func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
- conf, err := git.GetScmConfigForParamConfigKey(ctx, params)
- if err != nil {
- return time.Duration(0), err
- }
- if timeoutString := conf.Timeout; timeoutString != "" {
- timeout, err := time.ParseDuration(timeoutString)
- if err != nil {
- return time.Duration(0), err
- }
- return timeout, nil
- }
- return defaultTimeout, nil
-}
diff --git a/upstream/pkg/remoteresolution/resolver/git/resolver_test.go b/upstream/pkg/remoteresolution/resolver/git/resolver_test.go
deleted file mode 100644
index 84c5eb7c3e4..00000000000
--- a/upstream/pkg/remoteresolution/resolver/git/resolver_test.go
+++ /dev/null
@@ -1,982 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package git
-
-import (
- "context"
- "encoding/base64"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/go-git/go-git/v5"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/google/go-cmp/cmp"
- "github.com/jenkins-x/go-scm/scm"
- "github.com/jenkins-x/go-scm/scm/driver/fake"
- "github.com/jenkins-x/go-scm/scm/factory"
- resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- frameworktesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
- gitresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/system"
- _ "knative.dev/pkg/system/testing"
-)
-
-func TestGetSelector(t *testing.T) {
- resolver := Resolver{}
- sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
- t.Fatalf("unexpected selector: %v", sel)
- } else if typ != labelValueGitResolverType {
- t.Fatalf("unexpected type: %q", typ)
- }
-}
-
-func TestValidateParams(t *testing.T) {
- tests := []struct {
- name string
- wantErr string
- params map[string]string
- }{
- {
- name: "params with revision",
- params: map[string]string{
- gitresolution.UrlParam: "http://foo/bar/hello/moto",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "https url",
- params: map[string]string{
- gitresolution.UrlParam: "https://foo/bar/hello/moto",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "https url with username password",
- params: map[string]string{
- gitresolution.UrlParam: "https://user:pass@foo/bar/hello/moto",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "git server url",
- params: map[string]string{
- gitresolution.UrlParam: "git://repo/hello/moto",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "git url from a local repository",
- params: map[string]string{
- gitresolution.UrlParam: "/tmp/repo",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "git url from a git ssh repository",
- params: map[string]string{
- gitresolution.UrlParam: "git@host.com:foo/bar",
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- },
- },
- {
- name: "bad url",
- params: map[string]string{
- gitresolution.UrlParam: "foo://bar",
- gitresolution.PathParam: "path",
- gitresolution.RevisionParam: "revision",
- },
- wantErr: "invalid git repository url: foo://bar",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- resolver := Resolver{}
- err := resolver.Validate(context.Background(), &v1beta1.ResolutionRequestSpec{Params: toParams(tt.params)})
- if tt.wantErr == "" {
- if err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
- return
- }
-
- if d := cmp.Diff(tt.wantErr, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func TestValidateParamsNotEnabled(t *testing.T) {
- resolver := Resolver{}
-
- var err error
-
- someParams := map[string]string{
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- }
- err = resolver.Validate(resolverDisabledContext(), &v1beta1.ResolutionRequestSpec{Params: toParams(someParams)})
- if err == nil {
- t.Fatalf("expected disabled err")
- }
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-func TestValidateParams_Failure(t *testing.T) {
- testCases := []struct {
- name string
- params map[string]string
- expectedErr string
- }{
- {
- name: "missing multiple",
- params: map[string]string{
- gitresolution.OrgParam: "abcd1234",
- gitresolution.RepoParam: "foo",
- },
- expectedErr: fmt.Sprintf("missing required git resolver params: %s, %s", gitresolution.RevisionParam, gitresolution.PathParam),
- }, {
- name: "no repo or url",
- params: map[string]string{
- gitresolution.RevisionParam: "abcd1234",
- gitresolution.PathParam: "/foo/bar",
- },
- expectedErr: "must specify one of 'url' or 'repo'",
- }, {
- name: "both repo and url",
- params: map[string]string{
- gitresolution.RevisionParam: "abcd1234",
- gitresolution.PathParam: "/foo/bar",
- gitresolution.UrlParam: "http://foo",
- gitresolution.RepoParam: "foo",
- },
- expectedErr: "cannot specify both 'url' and 'repo'",
- }, {
- name: "no org with repo",
- params: map[string]string{
- gitresolution.RevisionParam: "abcd1234",
- gitresolution.PathParam: "/foo/bar",
- gitresolution.RepoParam: "foo",
- },
- expectedErr: "'org' is required when 'repo' is specified",
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resolver := &Resolver{}
- err := resolver.Validate(context.Background(), &v1beta1.ResolutionRequestSpec{Params: toParams(tc.params)})
- if err == nil {
- t.Fatalf("got no error, but expected: %s", tc.expectedErr)
- }
- if d := cmp.Diff(tc.expectedErr, err.Error()); d != "" {
- t.Errorf("error did not match: %s", diff.PrintWantGot(d))
- }
- })
- }
-}
-
-func TestGetResolutionTimeoutDefault(t *testing.T) {
- resolver := Resolver{}
- defaultTimeout := 30 * time.Minute
- timeout, err := resolver.GetResolutionTimeout(context.Background(), defaultTimeout, map[string]string{})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
- if timeout != defaultTimeout {
- t.Fatalf("expected default timeout to be returned")
- }
-}
-
-func TestGetResolutionTimeoutCustom(t *testing.T) {
- resolver := Resolver{}
- defaultTimeout := 30 * time.Minute
- configTimeout := 5 * time.Second
- config := map[string]string{
- gitresolution.DefaultTimeoutKey: configTimeout.String(),
- }
- ctx := resolutionframework.InjectResolverConfigToContext(context.Background(), config)
- timeout, err := resolver.GetResolutionTimeout(ctx, defaultTimeout, map[string]string{})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
- if timeout != configTimeout {
- t.Fatalf("expected timeout from config to be returned")
- }
-}
-
-func TestGetResolutionTimeoutCustomIdentifier(t *testing.T) {
- resolver := Resolver{}
- defaultTimeout := 30 * time.Minute
- configTimeout := 5 * time.Second
- identifierConfigTImeout := 10 * time.Second
- config := map[string]string{
- gitresolution.DefaultTimeoutKey: configTimeout.String(),
- "foo." + gitresolution.DefaultTimeoutKey: identifierConfigTImeout.String(),
- }
- ctx := resolutionframework.InjectResolverConfigToContext(context.Background(), config)
- timeout, err := resolver.GetResolutionTimeout(ctx, defaultTimeout, map[string]string{"configKey": "foo"})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
- if timeout != identifierConfigTImeout {
- t.Fatalf("expected timeout from config to be returned")
- }
-}
-
-func TestResolveNotEnabled(t *testing.T) {
- resolver := Resolver{}
-
- var err error
-
- someParams := map[string]string{
- gitresolution.PathParam: "bar",
- gitresolution.RevisionParam: "baz",
- }
- _, err = resolver.Resolve(resolverDisabledContext(), &v1beta1.ResolutionRequestSpec{Params: toParams(someParams)})
- if err == nil {
- t.Fatalf("expected disabled err")
- }
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-type params struct {
- url string
- revision string
- pathInRepo string
- org string
- repo string
- token string
- tokenKey string
- namespace string
- serverURL string
- scmType string
- configKey string
-}
-
-func TestResolve(t *testing.T) {
- // local repo set up for anonymous cloning
- // ----
- commits := []commitForRepo{{
- Dir: "foo/",
- Filename: "old",
- Content: "old content in test branch",
- Branch: "test-branch",
- }, {
- Dir: "foo/",
- Filename: "new",
- Content: "new content in test branch",
- Branch: "test-branch",
- }, {
- Dir: "./",
- Filename: "released",
- Content: "released content in main branch and in tag v1",
- Tag: "v1",
- }}
-
- anonFakeRepoURL, commitSHAsInAnonRepo := createTestRepo(t, commits)
-
- // local repo set up for scm cloning
- // ----
- withTemporaryGitConfig(t)
-
- testOrg := "test-org"
- testRepo := "test-repo"
-
- refsDir := filepath.Join("testdata", "test-org", "test-repo", "refs")
- mainPipelineYAML, err := os.ReadFile(filepath.Join(refsDir, "main", "pipelines", "example-pipeline.yaml"))
- if err != nil {
- t.Fatalf("couldn't read main pipeline: %v", err)
- }
- otherPipelineYAML, err := os.ReadFile(filepath.Join(refsDir, "other", "pipelines", "example-pipeline.yaml"))
- if err != nil {
- t.Fatalf("couldn't read other pipeline: %v", err)
- }
-
- mainTaskYAML, err := os.ReadFile(filepath.Join(refsDir, "main", "tasks", "example-task.yaml"))
- if err != nil {
- t.Fatalf("couldn't read main task: %v", err)
- }
-
- commitSHAsInSCMRepo := []string{"abc", "xyz"}
-
- scmFakeRepoURL := fmt.Sprintf("https://fake/%s/%s.git", testOrg, testRepo)
- resolver := &Resolver{
- clientFunc: func(driver string, serverURL string, token string, opts ...factory.ClientOptionFunc) (*scm.Client, error) {
- scmClient, scmData := fake.NewDefault()
-
- // repository service
- scmData.Repositories = []*scm.Repository{{
- FullName: fmt.Sprintf("%s/%s", testOrg, testRepo),
- Clone: scmFakeRepoURL,
- }}
-
- // git service
- scmData.Commits = map[string]*scm.Commit{
- "main": {Sha: commitSHAsInSCMRepo[0]},
- "other": {Sha: commitSHAsInSCMRepo[1]},
- }
- return scmClient, nil
- },
- }
-
- testCases := []struct {
- name string
- args *params
- config map[string]string
- apiToken string
- expectedCommitSHA string
- expectedStatus *v1beta1.ResolutionRequestStatus
- expectedErr error
- configIdentifer string
- }{{
- name: "clone: default revision main",
- args: ¶ms{
- pathInRepo: "./released",
- url: anonFakeRepoURL,
- },
- expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
- }, {
- name: "clone: revision is tag name",
- args: ¶ms{
- revision: "v1",
- pathInRepo: "./released",
- url: anonFakeRepoURL,
- },
- expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
- }, {
- name: "clone: revision is the full tag name i.e. refs/tags/v1",
- args: ¶ms{
- revision: "refs/tags/v1",
- pathInRepo: "./released",
- url: anonFakeRepoURL,
- },
- expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
- }, {
- name: "clone: revision is a branch name",
- args: ¶ms{
- revision: "test-branch",
- pathInRepo: "foo/new",
- url: anonFakeRepoURL,
- },
- expectedCommitSHA: commitSHAsInAnonRepo[1],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("new content in test branch")),
- }, {
- name: "clone: revision is a specific commit sha",
- args: ¶ms{
- revision: commitSHAsInAnonRepo[0],
- pathInRepo: "foo/old",
- url: anonFakeRepoURL,
- },
- expectedCommitSHA: commitSHAsInAnonRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("old content in test branch")),
- }, {
- name: "clone: file does not exist",
- args: ¶ms{
- pathInRepo: "foo/non-exist",
- url: anonFakeRepoURL,
- },
- expectedErr: createError(`error opening file "foo/non-exist": file does not exist`),
- }, {
- name: "clone: revision does not exist",
- args: ¶ms{
- revision: "non-existent-revision",
- pathInRepo: "foo/new",
- url: anonFakeRepoURL,
- },
- expectedErr: createError("revision error: reference not found"),
- }, {
- name: "api: successful task from params api information",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- token: "token-secret",
- tokenKey: "token",
- namespace: "foo",
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful task",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful task from params api information with identifier",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- token: "token-secret",
- tokenKey: "token",
- namespace: "foo",
- configKey: "test",
- },
- config: map[string]string{
- "test." + gitresolution.ServerURLKey: "fake",
- "test." + gitresolution.SCMTypeKey: "fake",
- },
- configIdentifer: "test.",
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful task with identifier",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- configKey: "test",
- },
- config: map[string]string{
- "test." + gitresolution.ServerURLKey: "fake",
- "test." + gitresolution.SCMTypeKey: "fake",
- "test." + gitresolution.APISecretNameKey: "token-secret",
- "test." + gitresolution.APISecretKeyKey: "token",
- "test." + gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- configIdentifer: "test.",
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful pipeline",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainPipelineYAML),
- }, {
- name: "api: successful pipeline with default revision",
- args: ¶ms{
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- gitresolution.DefaultRevisionKey: "other",
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[1],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(otherPipelineYAML),
- }, {
- name: "api: successful override scm type and server URL from user params",
-
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- token: "token-secret",
- tokenKey: "token",
- namespace: "foo",
- scmType: "fake",
- serverURL: "fake",
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "notsofake",
- gitresolution.SCMTypeKey: "definitivelynotafake",
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: file does not exist",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/other-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: createError("couldn't fetch resource content: file testdata/test-org/test-repo/refs/main/pipelines/other-pipeline.yaml does not exist: stat testdata/test-org/test-repo/refs/main/pipelines/other-pipeline.yaml: no such file or directory"),
- }, {
- name: "api: token not found",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: createError("cannot get API token, secret token-secret not found in namespace " + system.Namespace()),
- }, {
- name: "api: token secret name not specified",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: createError("cannot get API token, required when specifying 'repo' param, 'api-token-secret-name' not specified in config"),
- }, {
- name: "api: token secret key not specified",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.ServerURLKey: "fake",
- gitresolution.SCMTypeKey: "fake",
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: createError("cannot get API token, required when specifying 'repo' param, 'api-token-secret-key' not specified in config"),
- }, {
- name: "api: SCM type not specified",
- args: ¶ms{
- revision: "main",
- pathInRepo: "pipelines/example-pipeline.yaml",
- org: testOrg,
- repo: testRepo,
- },
- config: map[string]string{
- gitresolution.APISecretNameKey: "token-secret",
- gitresolution.APISecretKeyKey: "token",
- gitresolution.APISecretNamespaceKey: system.Namespace(),
- },
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainPipelineYAML),
- }}
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- ctx, _ := ttesting.SetupFakeContext(t)
-
- cfg := tc.config
- if cfg == nil {
- cfg = make(map[string]string)
- }
- cfg[tc.configIdentifer+gitresolution.DefaultTimeoutKey] = "1m"
- if cfg[tc.configIdentifer+gitresolution.DefaultRevisionKey] == "" {
- cfg[tc.configIdentifer+gitresolution.DefaultRevisionKey] = plumbing.Master.Short()
- }
-
- request := createRequest(tc.args)
-
- d := test.Data{
- ConfigMaps: []*corev1.ConfigMap{{
- ObjectMeta: metav1.ObjectMeta{
- Name: ConfigMapName,
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: cfg,
- }, {
- ObjectMeta: metav1.ObjectMeta{
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- Name: resolverconfig.GetFeatureFlagsConfigName(),
- },
- Data: map[string]string{
- "enable-git-resolver": "true",
- },
- }},
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
-
- var expectedStatus *v1beta1.ResolutionRequestStatus
- if tc.expectedStatus != nil {
- expectedStatus = tc.expectedStatus.DeepCopy()
-
- if tc.expectedErr == nil {
- // status.annotations
- if expectedStatus.Annotations == nil {
- expectedStatus.Annotations = make(map[string]string)
- }
- expectedStatus.Annotations[common.AnnotationKeyContentType] = "application/x-yaml"
- expectedStatus.Annotations[gitresolution.AnnotationKeyRevision] = tc.expectedCommitSHA
- expectedStatus.Annotations[gitresolution.AnnotationKeyPath] = tc.args.pathInRepo
-
- if tc.args.url != "" {
- expectedStatus.Annotations[gitresolution.AnnotationKeyURL] = anonFakeRepoURL
- } else {
- expectedStatus.Annotations[gitresolution.AnnotationKeyOrg] = testOrg
- expectedStatus.Annotations[gitresolution.AnnotationKeyRepo] = testRepo
- expectedStatus.Annotations[gitresolution.AnnotationKeyURL] = scmFakeRepoURL
- }
-
- // status.refSource
- expectedStatus.RefSource = &pipelinev1.RefSource{
- URI: "git+" + expectedStatus.Annotations[gitresolution.AnnotationKeyURL],
- Digest: map[string]string{
- "sha1": tc.expectedCommitSHA,
- },
- EntryPoint: tc.args.pathInRepo,
- }
- expectedStatus.Source = expectedStatus.RefSource
- } else {
- expectedStatus.Status.Conditions[0].Message = tc.expectedErr.Error()
- }
- }
-
- frtesting.RunResolverReconcileTest(ctx, t, d, resolver, request, expectedStatus, tc.expectedErr, func(resolver framework.Resolver, testAssets test.Assets) {
- var secretName, secretNameKey, secretNamespace string
- if tc.config[tc.configIdentifer+gitresolution.APISecretNameKey] != "" && tc.config[tc.configIdentifer+gitresolution.APISecretNamespaceKey] != "" && tc.config[tc.configIdentifer+gitresolution.APISecretKeyKey] != "" && tc.apiToken != "" {
- secretName, secretNameKey, secretNamespace = tc.config[tc.configIdentifer+gitresolution.APISecretNameKey], tc.config[tc.configIdentifer+gitresolution.APISecretKeyKey], tc.config[tc.configIdentifer+gitresolution.APISecretNamespaceKey]
- }
- if tc.args.token != "" && tc.args.namespace != "" && tc.args.tokenKey != "" {
- secretName, secretNameKey, secretNamespace = tc.args.token, tc.args.tokenKey, tc.args.namespace
- }
- if secretName == "" || secretNameKey == "" || secretNamespace == "" {
- return
- }
- tokenSecret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: secretName,
- Namespace: secretNamespace,
- },
- Data: map[string][]byte{
- secretNameKey: []byte(base64.StdEncoding.Strict().EncodeToString([]byte(tc.apiToken))),
- },
- Type: corev1.SecretTypeOpaque,
- }
- if _, err := testAssets.Clients.Kube.CoreV1().Secrets(secretNamespace).Create(ctx, tokenSecret, metav1.CreateOptions{}); err != nil {
- t.Fatalf("failed to create test token secret: %v", err)
- }
- })
- })
- }
-}
-
-// createTestRepo is used to instantiate a local test repository with the desired commits.
-func createTestRepo(t *testing.T, commits []commitForRepo) (string, []string) {
- t.Helper()
- commitSHAs := []string{}
-
- t.Helper()
- tempDir := t.TempDir()
-
- repo, err := git.PlainInit(tempDir, false)
-
- worktree, err := repo.Worktree()
- if err != nil {
- t.Fatalf("getting test worktree: %v", err)
- }
- if worktree == nil {
- t.Fatal("test worktree not created")
- }
-
- startingHash := writeAndCommitToTestRepo(t, worktree, tempDir, "", "README", []byte("This is a test"))
-
- hashesByBranch := make(map[string][]string)
-
- // Iterate over the commits and add them.
- for _, cmt := range commits {
- branch := cmt.Branch
- if branch == "" {
- branch = plumbing.Master.Short()
- }
-
- // If we're given a revision, check out that revision.
- coOpts := &git.CheckoutOptions{
- Branch: plumbing.NewBranchReferenceName(branch),
- }
-
- if _, ok := hashesByBranch[branch]; !ok && branch != plumbing.Master.Short() {
- coOpts.Hash = plumbing.NewHash(startingHash.String())
- coOpts.Create = true
- }
-
- if err := worktree.Checkout(coOpts); err != nil {
- t.Fatalf("couldn't do checkout of %s: %v", branch, err)
- }
-
- hash := writeAndCommitToTestRepo(t, worktree, tempDir, cmt.Dir, cmt.Filename, []byte(cmt.Content))
- commitSHAs = append(commitSHAs, hash.String())
-
- if _, ok := hashesByBranch[branch]; !ok {
- hashesByBranch[branch] = []string{hash.String()}
- } else {
- hashesByBranch[branch] = append(hashesByBranch[branch], hash.String())
- }
-
- if cmt.Tag != "" {
- _, err = repo.CreateTag(cmt.Tag, hash, &git.CreateTagOptions{
- Message: cmt.Tag,
- Tagger: &object.Signature{
- Name: "Someone",
- Email: "someone@example.com",
- When: time.Now(),
- },
- })
- }
- if err != nil {
- t.Fatalf("couldn't add tag for %s: %v", cmt.Tag, err)
- }
- }
-
- return tempDir, commitSHAs
-}
-
-// commitForRepo provides the directory, filename, content and revision for a test commit.
-type commitForRepo struct {
- Dir string
- Filename string
- Content string
- Branch string
- Tag string
-}
-
-func writeAndCommitToTestRepo(t *testing.T, worktree *git.Worktree, repoDir string, subPath string, filename string, content []byte) plumbing.Hash {
- t.Helper()
-
- targetDir := repoDir
- if subPath != "" {
- targetDir = filepath.Join(targetDir, subPath)
- fi, err := os.Stat(targetDir)
- if os.IsNotExist(err) {
- if err := os.MkdirAll(targetDir, 0o700); err != nil {
- t.Fatalf("couldn't create directory %s in worktree: %v", targetDir, err)
- }
- } else if err != nil {
- t.Fatalf("checking if directory %s in worktree exists: %v", targetDir, err)
- }
- if fi != nil && !fi.IsDir() {
- t.Fatalf("%s already exists but is not a directory", targetDir)
- }
- }
-
- outfile := filepath.Join(targetDir, filename)
- if err := os.WriteFile(outfile, content, 0o600); err != nil {
- t.Fatalf("couldn't write content to file %s: %v", outfile, err)
- }
-
- _, err := worktree.Add(filepath.Join(subPath, filename))
- if err != nil {
- t.Fatalf("couldn't add file %s to git: %v", outfile, err)
- }
-
- hash, err := worktree.Commit("adding file for test", &git.CommitOptions{
- Author: &object.Signature{
- Name: "Someone",
- Email: "someone@example.com",
- When: time.Now(),
- },
- })
- if err != nil {
- t.Fatalf("couldn't perform commit for test: %v", err)
- }
-
- return hash
-}
-
-// withTemporaryGitConfig resets the .gitconfig for the duration of the test.
-func withTemporaryGitConfig(t *testing.T) {
- t.Helper()
- gitConfigDir := t.TempDir()
- key := "GIT_CONFIG_GLOBAL"
- t.Setenv(key, filepath.Join(gitConfigDir, "config"))
-}
-
-func createRequest(args *params) *v1beta1.ResolutionRequest {
- rr := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- common.LabelKeyResolverType: labelValueGitResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: gitresolution.PathParam,
- Value: *pipelinev1.NewStructuredValues(args.pathInRepo),
- }},
- },
- }
-
- if args.revision != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.RevisionParam,
- Value: *pipelinev1.NewStructuredValues(args.revision),
- })
- }
-
- if args.serverURL != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.ServerURLParam,
- Value: *pipelinev1.NewStructuredValues(args.serverURL),
- })
- }
- if args.scmType != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.ScmTypeParam,
- Value: *pipelinev1.NewStructuredValues(args.scmType),
- })
- }
-
- if args.url != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.UrlParam,
- Value: *pipelinev1.NewStructuredValues(args.url),
- })
- } else {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.RepoParam,
- Value: *pipelinev1.NewStructuredValues(args.repo),
- })
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.OrgParam,
- Value: *pipelinev1.NewStructuredValues(args.org),
- })
- if args.token != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.TokenParam,
- Value: *pipelinev1.NewStructuredValues(args.token),
- })
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.TokenKeyParam,
- Value: *pipelinev1.NewStructuredValues(args.tokenKey),
- })
- }
- }
-
- if args.configKey != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: gitresolution.ConfigKeyParam,
- Value: *pipelinev1.NewStructuredValues(args.configKey),
- })
- }
-
- return rr
-}
-
-func resolverDisabledContext() context.Context {
- return frameworktesting.ContextWithGitResolverDisabled(context.Background())
-}
-
-func createError(msg string) error {
- return &common.GetResourceError{
- ResolverName: gitResolverName,
- Key: "foo/rr",
- Original: errors.New(msg),
- }
-}
-
-func toParams(m map[string]string) []pipelinev1.Param {
- var params []pipelinev1.Param
-
- for k, v := range m {
- params = append(params, pipelinev1.Param{
- Name: k,
- Value: *pipelinev1.NewStructuredValues(v),
- })
- }
-
- return params
-}
diff --git a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/pipelines/example-pipeline.yaml b/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/pipelines/example-pipeline.yaml
deleted file mode 100644
index cc697dd2e91..00000000000
--- a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/pipelines/example-pipeline.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Pipeline
-metadata:
- name: example-pipeline
-spec:
- tasks:
- - name: some-pipeline-task
- taskRef:
- kind: Task
- name: some-task
diff --git a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/tasks/example-task.yaml b/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/tasks/example-task.yaml
deleted file mode 100644
index 97ad418341e..00000000000
--- a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/main/tasks/example-task.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
- name: example-task
-spec:
- steps:
- - command: ['something']
- image: some-image
- name: some-step
diff --git a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/other/pipelines/example-pipeline.yaml b/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/other/pipelines/example-pipeline.yaml
deleted file mode 100644
index 836822c6fa4..00000000000
--- a/upstream/pkg/remoteresolution/resolver/git/testdata/test-org/test-repo/refs/other/pipelines/example-pipeline.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: tekton.dev/v1
-kind: Pipeline
-metadata:
- name: example-pipeline
-spec:
- tasks:
- - name: some-pipeline-task
- taskRef:
- kind: Task
- name: some-other-task
diff --git a/upstream/pkg/remoteresolution/resolver/http/resolver.go b/upstream/pkg/remoteresolution/resolver/http/resolver.go
deleted file mode 100644
index ec106586d9c..00000000000
--- a/upstream/pkg/remoteresolution/resolver/http/resolver.go
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package http
-
-import (
- "context"
- "errors"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/http"
- "go.uber.org/zap"
- "k8s.io/client-go/kubernetes"
- kubeclient "knative.dev/pkg/client/injection/kube/client"
- "knative.dev/pkg/logging"
-)
-
-const (
- // LabelValueHttpResolverType is the value to use for the
- // resolution.tekton.dev/type label on resource requests
- LabelValueHttpResolverType string = "http"
-
- disabledError = "cannot handle resolution request, enable-http-resolver feature flag not true"
-
- // httpResolverName The name of the resolver
- httpResolverName = "Http"
-
- // configMapName is the http resolver's config map
- configMapName = "http-resolver-config"
-
- // default Timeout value when fetching http resources in seconds
- defaultHttpTimeoutValue = "1m"
-
- // default key in the HTTP password secret
- defaultBasicAuthSecretKey = "password"
-)
-
-var _ framework.Resolver = &Resolver{}
-
-// Resolver implements a framework.Resolver that can fetch files from an HTTP URL
-type Resolver struct {
- kubeClient kubernetes.Interface
- logger *zap.SugaredLogger
-}
-
-func (r *Resolver) Initialize(ctx context.Context) error {
- r.kubeClient = kubeclient.Get(ctx)
- r.logger = logging.FromContext(ctx)
- return nil
-}
-
-// GetName returns a string name to refer to this resolver by.
-func (r *Resolver) GetName(context.Context) string {
- return httpResolverName
-}
-
-// GetConfigName returns the name of the http resolver's configmap.
-func (r *Resolver) GetConfigName(context.Context) string {
- return configMapName
-}
-
-// GetSelector returns a map of labels to match requests to this resolver.
-func (r *Resolver) GetSelector(context.Context) map[string]string {
- return map[string]string{
- common.LabelKeyResolverType: LabelValueHttpResolverType,
- }
-}
-
-// Validate ensures parameters from a request are as expected.
-func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return http.ValidateParams(ctx, req.Params)
- }
- // Remove this error once validate url has been implemented.
- return errors.New("cannot validate request. the Validate method has not been implemented.")
-}
-
-// Resolve uses the given params to resolve the requested file or resource.
-func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- oParams := req.Params
- if http.IsDisabled(ctx) {
- return nil, errors.New(disabledError)
- }
-
- params, err := http.PopulateDefaultParams(ctx, oParams)
- if err != nil {
- return nil, err
- }
-
- return http.FetchHttpResource(ctx, params, r.kubeClient, r.logger)
- }
- // Remove this error once resolution of url has been implemented.
- return nil, errors.New("the Resolve method has not been implemented.")
-}
diff --git a/upstream/pkg/remoteresolution/resolver/http/resolver_test.go b/upstream/pkg/remoteresolution/resolver/http/resolver_test.go
deleted file mode 100644
index f95fded82d4..00000000000
--- a/upstream/pkg/remoteresolution/resolver/http/resolver_test.go
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package http
-
-import (
- "context"
- "crypto/sha256"
- "encoding/base64"
- "encoding/hex"
- "errors"
- "fmt"
- "net/http"
- "net/http/httptest"
- "regexp"
- "testing"
- "time"
-
- "github.com/google/go-cmp/cmp"
- resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- frtesting "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework/testing"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- frameworktesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
- httpresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/http"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/system"
- _ "knative.dev/pkg/system/testing"
-)
-
-type params struct {
- url string
- authUsername string
- authSecret string
- authSecretKey string
- authSecretContent string
-}
-
-const sampleTask = `---
-kind: Task
-apiVersion: tekton.dev/v1
-metadata:
- name: foo
-spec:
- steps:
- - name: step1
- image: scratch`
-const emptyStr = "empty"
-
-func TestGetSelector(t *testing.T) {
- resolver := Resolver{}
- sel := resolver.GetSelector(context.Background())
- if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
- t.Fatalf("unexpected selector: %v", sel)
- } else if typ != LabelValueHttpResolverType {
- t.Fatalf("unexpected type: %q", typ)
- }
-}
-
-func TestValidate(t *testing.T) {
- testCases := []struct {
- name string
- url string
- expectedErr error
- }{
- {
- name: "valid/url",
- url: "https://raw.githubusercontent.com/tektoncd/catalog/main/task/git-clone/0.4/git-clone.yaml",
- }, {
- name: "invalid/url",
- url: "xttps:ufoo/bar/",
- expectedErr: errors.New(`url xttps:ufoo/bar/ is not a valid http(s) url`),
- }, {
- name: "invalid/url empty",
- url: "",
- expectedErr: errors.New(`cannot parse url : parse "": empty url`),
- }, {
- name: "missing/url",
- expectedErr: errors.New(`missing required http resolver params: url`),
- url: "nourl",
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resolver := Resolver{}
- params := map[string]string{}
- if tc.url != "nourl" {
- params[httpresolution.UrlParam] = tc.url
- } else {
- // inject a fake param so that it can validate that the url is actually missing.
- params["foo"] = "bar"
- }
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(params),
- }
- err := resolver.Validate(contextWithConfig(defaultHttpTimeoutValue), &req)
- if tc.expectedErr != nil {
- checkExpectedErr(t, tc.expectedErr, err)
- } else if err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
- })
- }
-}
-
-func TestResolve(t *testing.T) {
- tests := []struct {
- name string
- expectedErr string
- input string
- paramSet bool
- expectedStatus int
- }{
- {
- name: "good/params set",
- input: "task",
- paramSet: true,
- }, {
- name: "bad/params not set",
- input: "task",
- expectedErr: `missing required http resolver params: url`,
- }, {
- name: "bad/not found",
- input: "task",
- paramSet: true,
- expectedStatus: http.StatusNotFound,
- expectedErr: `requested URL 'http://([^']*)' is not found`,
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if tc.expectedStatus != 0 {
- w.WriteHeader(tc.expectedStatus)
- }
- fmt.Fprint(w, tc.input)
- }))
- params := []pipelinev1.Param{}
- if tc.paramSet {
- params = append(params, pipelinev1.Param{
- Name: httpresolution.UrlParam,
- Value: *pipelinev1.NewStructuredValues(svr.URL),
- })
- } else {
- params = append(params, pipelinev1.Param{
- Name: "foo",
- Value: *pipelinev1.NewStructuredValues("bar"),
- })
- }
- resolver := Resolver{}
- req := v1beta1.ResolutionRequestSpec{
- Params: params,
- }
- output, err := resolver.Resolve(contextWithConfig(defaultHttpTimeoutValue), &req)
- if tc.expectedErr != "" {
- re := regexp.MustCompile(tc.expectedErr)
- if !re.MatchString(err.Error()) {
- t.Fatalf("expected error '%v' but got '%v'", tc.expectedErr, err)
- }
- return
- } else if err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
- if o := cmp.Diff(tc.input, string(output.Data())); o != "" {
- t.Fatalf("expected output '%v' but got '%v'", tc.input, string(output.Data()))
- }
- if o := cmp.Diff(svr.URL, output.RefSource().URI); o != "" {
- t.Fatalf("expected url '%v' but got '%v'", svr.URL, output.RefSource().URI)
- }
-
- eSum := sha256.New()
- eSum.Write([]byte(tc.input))
- eSha256 := hex.EncodeToString(eSum.Sum(nil))
- if o := cmp.Diff(eSha256, output.RefSource().Digest["sha256"]); o != "" {
- t.Fatalf("expected sha256 '%v' but got '%v'", eSha256, output.RefSource().Digest["sha256"])
- }
-
- if output.Annotations() != nil {
- t.Fatalf("output annotations should be nil")
- }
- })
- }
-}
-
-func TestResolveNotEnabled(t *testing.T) {
- var err error
- resolver := Resolver{}
- someParams := map[string]string{"foo": "bar"}
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(someParams),
- }
- _, err = resolver.Resolve(resolverDisabledContext(), &req)
- if err == nil {
- t.Fatalf("expected disabled err")
- }
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
- err = resolver.Validate(resolverDisabledContext(), &v1beta1.ResolutionRequestSpec{Params: toParams(someParams)})
- if err == nil {
- t.Fatalf("expected disabled err")
- }
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-}
-
-func TestResolverReconcileBasicAuth(t *testing.T) {
- var doNotCreate string = "notcreate"
- var wrongSecretKey string = "wrongsecretk"
-
- tests := []struct {
- name string
- params *params
- taskContent string
- expectedStatus *v1beta1.ResolutionRequestStatus
- expectedErr error
- }{
- {
- name: "good/URL Resolution",
- taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
- },
- {
- name: "good/URL Resolution with custom basic auth, and custom secret key",
- taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
- params: ¶ms{
- authSecret: "auth-secret",
- authUsername: "auth",
- authSecretKey: "token",
- authSecretContent: "untoken",
- },
- },
- {
- name: "good/URL Resolution with custom basic auth no custom secret key",
- taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
- params: ¶ms{
- authSecret: "auth-secret",
- authUsername: "auth",
- authSecretContent: "untoken",
- },
- },
- {
- name: "bad/no url found",
- params: ¶ms{},
- expectedErr: errors.New(`invalid resource request "foo/rr": cannot parse url : parse "": empty url`),
- },
- {
- name: "bad/no secret found",
- params: ¶ms{
- authSecret: doNotCreate,
- authUsername: "user",
- url: "https://blah/blah.com",
- },
- expectedErr: errors.New(`error getting "Http" "foo/rr": cannot get API token, secret notcreate not found in namespace foo`),
- },
- {
- name: "bad/no valid secret key",
- params: ¶ms{
- authSecret: "shhhhh",
- authUsername: "user",
- authSecretKey: wrongSecretKey,
- url: "https://blah/blah",
- },
- expectedErr: errors.New(`error getting "Http" "foo/rr": cannot get API token, key wrongsecretk not found in secret shhhhh in namespace foo`),
- },
- {
- name: "bad/missing username params for secret with params",
- params: ¶ms{
- authSecret: "shhhhh",
- url: "https://blah/blah",
- },
- expectedErr: errors.New(`invalid resource request "foo/rr": missing required param http-username when using http-password-secret`),
- },
- {
- name: "bad/missing password params for secret with username",
- params: ¶ms{
- authUsername: "failure",
- url: "https://blah/blah",
- },
- expectedErr: errors.New(`invalid resource request "foo/rr": missing required param http-password-secret when using http-username`),
- },
- {
- name: "bad/empty auth username",
- params: ¶ms{
- authUsername: emptyStr,
- authSecret: "asecret",
- url: "https://blah/blah",
- },
- expectedErr: errors.New(`invalid resource request "foo/rr": value http-username cannot be empty`),
- },
- {
- name: "bad/empty auth password",
- params: ¶ms{
- authUsername: "auser",
- authSecret: emptyStr,
- url: "https://blah/blah",
- },
- expectedErr: errors.New(`invalid resource request "foo/rr": value http-password-secret cannot be empty`),
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- resolver := &Resolver{}
- ctx, _ := ttesting.SetupFakeContext(t)
- svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, tt.taskContent)
- }))
- p := tt.params
- if p == nil {
- p = ¶ms{}
- }
- if p.url == "" && tt.taskContent != "" {
- p.url = svr.URL
- }
- request := createRequest(p)
- cfg := make(map[string]string)
- d := test.Data{
- ConfigMaps: []*corev1.ConfigMap{{
- ObjectMeta: metav1.ObjectMeta{
- Name: configMapName,
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- },
- Data: cfg,
- }, {
- ObjectMeta: metav1.ObjectMeta{
- Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
- Name: resolverconfig.GetFeatureFlagsConfigName(),
- },
- Data: map[string]string{
- "enable-http-resolver": "true",
- },
- }},
- ResolutionRequests: []*v1beta1.ResolutionRequest{request},
- }
- var expectedStatus *v1beta1.ResolutionRequestStatus
- if tt.expectedStatus != nil {
- expectedStatus = tt.expectedStatus.DeepCopy()
- if tt.expectedErr == nil {
- if tt.taskContent != "" {
- h := sha256.New()
- h.Write([]byte(tt.taskContent))
- sha256CheckSum := hex.EncodeToString(h.Sum(nil))
- refsrc := &pipelinev1.RefSource{
- URI: svr.URL,
- Digest: map[string]string{
- "sha256": sha256CheckSum,
- },
- }
- expectedStatus.RefSource = refsrc
- expectedStatus.Source = refsrc
- }
- } else {
- expectedStatus.Status.Conditions[0].Message = tt.expectedErr.Error()
- }
- }
- frtesting.RunResolverReconcileTest(ctx, t, d, resolver, request, expectedStatus, tt.expectedErr, func(resolver framework.Resolver, testAssets test.Assets) {
- if err := resolver.Initialize(ctx); err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- if tt.params == nil {
- return
- }
- if tt.params.authSecret != "" && tt.params.authSecret != doNotCreate {
- secretKey := tt.params.authSecretKey
- if secretKey == wrongSecretKey {
- secretKey = "randomNotOund"
- }
- if secretKey == "" {
- secretKey = defaultBasicAuthSecretKey
- }
- tokenSecret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: tt.params.authSecret,
- Namespace: request.GetNamespace(),
- },
- Data: map[string][]byte{
- secretKey: []byte(base64.StdEncoding.Strict().EncodeToString([]byte(tt.params.authSecretContent))),
- },
- }
- if _, err := testAssets.Clients.Kube.CoreV1().Secrets(request.GetNamespace()).Create(ctx, tokenSecret, metav1.CreateOptions{}); err != nil {
- t.Fatalf("failed to create test token secret: %v", err)
- }
- }
- })
- })
- }
-}
-
-func TestGetName(t *testing.T) {
- resolver := Resolver{}
- ctx := context.Background()
-
- if d := cmp.Diff(httpResolverName, resolver.GetName(ctx)); d != "" {
- t.Errorf("invalid name: %s", diff.PrintWantGot(d))
- }
- if d := cmp.Diff(configMapName, resolver.GetConfigName(ctx)); d != "" {
- t.Errorf("invalid config map name: %s", diff.PrintWantGot(d))
- }
-}
-
-func resolverDisabledContext() context.Context {
- return frameworktesting.ContextWithHttpResolverDisabled(context.Background())
-}
-
-func toParams(m map[string]string) []pipelinev1.Param {
- var params []pipelinev1.Param
-
- for k, v := range m {
- params = append(params, pipelinev1.Param{
- Name: k,
- Value: *pipelinev1.NewStructuredValues(v),
- })
- }
-
- return params
-}
-
-func contextWithConfig(timeout string) context.Context {
- config := map[string]string{
- httpresolution.TimeoutKey: timeout,
- }
- return resolutionframework.InjectResolverConfigToContext(context.Background(), config)
-}
-
-func checkExpectedErr(t *testing.T, expectedErr, actualErr error) {
- t.Helper()
- if actualErr == nil {
- t.Fatalf("expected err '%v' but didn't get one", expectedErr)
- }
- if d := cmp.Diff(expectedErr.Error(), actualErr.Error()); d != "" {
- t.Fatalf("expected err '%v' but got '%v'", expectedErr, actualErr)
- }
-}
-
-func createRequest(params *params) *v1beta1.ResolutionRequest {
- rr := &v1beta1.ResolutionRequest{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "resolution.tekton.dev/v1beta1",
- Kind: "ResolutionRequest",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "rr",
- Namespace: "foo",
- CreationTimestamp: metav1.Time{Time: time.Now()},
- Labels: map[string]string{
- resolutioncommon.LabelKeyResolverType: LabelValueHttpResolverType,
- },
- },
- Spec: v1beta1.ResolutionRequestSpec{
- Params: []pipelinev1.Param{{
- Name: httpresolution.UrlParam,
- Value: *pipelinev1.NewStructuredValues(params.url),
- }},
- },
- }
- if params.authSecret != "" {
- s := params.authSecret
- if s == emptyStr {
- s = ""
- }
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: httpresolution.HttpBasicAuthSecret,
- Value: *pipelinev1.NewStructuredValues(s),
- })
- }
-
- if params.authUsername != "" {
- s := params.authUsername
- if s == emptyStr {
- s = ""
- }
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: httpresolution.HttpBasicAuthUsername,
- Value: *pipelinev1.NewStructuredValues(s),
- })
- }
-
- if params.authSecretKey != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: httpresolution.HttpBasicAuthSecretKey,
- Value: *pipelinev1.NewStructuredValues(params.authSecretKey),
- })
- }
-
- return rr
-}
diff --git a/upstream/pkg/remoteresolution/resolver/hub/resolver.go b/upstream/pkg/remoteresolution/resolver/hub/resolver.go
deleted file mode 100644
index 8c29b23e50d..00000000000
--- a/upstream/pkg/remoteresolution/resolver/hub/resolver.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package hub
-
-import (
- "context"
- "errors"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
-)
-
-const (
- // LabelValueHubResolverType is the value to use for the
- // resolution.tekton.dev/type label on resource requests
- LabelValueHubResolverType string = "hub"
-
- // ArtifactHubType is the value to use setting the type field to artifact
- ArtifactHubType string = "artifact"
-
- // TektonHubType is the value to use setting the type field to tekton
- TektonHubType string = "tekton"
-)
-
-var _ framework.Resolver = &Resolver{}
-
-// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
-type Resolver struct {
- // TektonHubURL is the URL for hub resolver with type tekton
- TektonHubURL string
- // ArtifactHubURL is the URL for hub resolver with type artifact
- ArtifactHubURL string
-}
-
-// Initialize sets up any dependencies needed by the resolver. None atm.
-func (r *Resolver) Initialize(context.Context) error {
- return nil
-}
-
-// GetName returns a string name to refer to this resolver by.
-func (r *Resolver) GetName(context.Context) string {
- return "Hub"
-}
-
-// GetConfigName returns the name of the bundle resolver's configmap.
-func (r *Resolver) GetConfigName(context.Context) string {
- return "hubresolver-config"
-}
-
-// GetSelector returns a map of labels to match requests to this resolver.
-func (r *Resolver) GetSelector(context.Context) map[string]string {
- return map[string]string{
- common.LabelKeyResolverType: LabelValueHubResolverType,
- }
-}
-
-// Validate ensures parameters from a request are as expected.
-func (r *Resolver) Validate(ctx context.Context, req *v1beta1.ResolutionRequestSpec) error {
- if len(req.Params) > 0 {
- return hub.ValidateParams(ctx, req.Params, r.TektonHubURL)
- }
- // Remove this error once validate url has been implemented.
- return errors.New("cannot validate request. the Validate method has not been implemented.")
-}
-
-// Resolve uses the given params to resolve the requested file or resource.
-func (r *Resolver) Resolve(ctx context.Context, req *v1beta1.ResolutionRequestSpec) (resolutionframework.ResolvedResource, error) {
- if len(req.Params) > 0 {
- return hub.Resolve(ctx, req.Params, r.TektonHubURL, r.ArtifactHubURL)
- }
- // Remove this error once resolution of url has been implemented.
- return nil, errors.New("the Resolve method has not been implemented.")
-}
diff --git a/upstream/pkg/remoteresolution/resolver/hub/resolver_test.go b/upstream/pkg/remoteresolution/resolver/hub/resolver_test.go
deleted file mode 100644
index da8f4ea2021..00000000000
--- a/upstream/pkg/remoteresolution/resolver/hub/resolver_test.go
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package hub
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- hubresolver "github.com/tektoncd/pipeline/pkg/resolution/resolver/hub"
- "github.com/tektoncd/pipeline/test/diff"
-)
-
-func TestGetSelector(t *testing.T) {
- resolver := Resolver{}
- sel := resolver.GetSelector(context.Background())
- if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
- t.Fatalf("unexpected selector: %v", sel)
- } else if typ != LabelValueHubResolverType {
- t.Fatalf("unexpected type: %q", typ)
- }
-}
-
-func TestValidate(t *testing.T) {
- testCases := []struct {
- testName string
- kind string
- version string
- catalog string
- resourceName string
- hubType string
- expectedErr error
- }{
- {
- testName: "artifact type validation",
- kind: "task",
- resourceName: "foo",
- version: "bar",
- catalog: "baz",
- hubType: ArtifactHubType,
- }, {
- testName: "tekton type validation",
- kind: "task",
- resourceName: "foo",
- version: "bar",
- catalog: "baz",
- hubType: TektonHubType,
- expectedErr: errors.New("failed to validate params: please configure TEKTON_HUB_API env variable to use tekton type"),
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.testName, func(t *testing.T) {
- resolver := Resolver{}
- params := map[string]string{
- hubresolver.ParamKind: tc.kind,
- hubresolver.ParamName: tc.resourceName,
- hubresolver.ParamVersion: tc.version,
- hubresolver.ParamCatalog: tc.catalog,
- hubresolver.ParamType: tc.hubType,
- }
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(params),
- }
- err := resolver.Validate(contextWithConfig(), &req)
- if tc.expectedErr != nil {
- checkExpectedErr(t, tc.expectedErr, err)
- } else if err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
- })
- }
-}
-
-func TestValidateMissing(t *testing.T) {
- resolver := Resolver{}
-
- var err error
-
- paramsMissingName := map[string]string{
- hubresolver.ParamKind: "foo",
- hubresolver.ParamVersion: "bar",
- }
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(paramsMissingName),
- }
- err = resolver.Validate(contextWithConfig(), &req)
- if err == nil {
- t.Fatalf("expected missing name err")
- }
-
- paramsMissingVersion := map[string]string{
- hubresolver.ParamKind: "foo",
- hubresolver.ParamName: "bar",
- }
- req = v1beta1.ResolutionRequestSpec{
- Params: toParams(paramsMissingVersion),
- }
- err = resolver.Validate(contextWithConfig(), &req)
-
- if err == nil {
- t.Fatalf("expected missing version err")
- }
-}
-
-func TestValidateConflictingKindName(t *testing.T) {
- testCases := []struct {
- kind string
- name string
- version string
- catalog string
- hubType string
- }{
- {
- kind: "not-taskpipeline",
- name: "foo",
- version: "bar",
- catalog: "baz",
- hubType: TektonHubType,
- },
- {
- kind: "task",
- name: "foo",
- version: "bar",
- catalog: "baz",
- hubType: "not-tekton-artifact",
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- resolver := Resolver{}
- params := map[string]string{
- hubresolver.ParamKind: tc.kind,
- hubresolver.ParamName: tc.name,
- hubresolver.ParamVersion: tc.version,
- hubresolver.ParamCatalog: tc.catalog,
- hubresolver.ParamType: tc.hubType,
- }
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(params),
- }
- err := resolver.Validate(contextWithConfig(), &req)
- if err == nil {
- t.Fatalf("expected err due to conflicting param")
- }
- })
- }
-}
-
-func TestResolve(t *testing.T) {
- testCases := []struct {
- name string
- kind string
- imageName string
- version string
- catalog string
- hubType string
- input string
- expectedRes []byte
- expectedErr error
- }{
- {
- name: "valid response from Tekton Hub",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: TektonHubType,
- input: `{"data":{"yaml":"some content"}}`,
- expectedRes: []byte("some content"),
- },
- {
- name: "valid response from Artifact Hub",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: ArtifactHubType,
- input: `{"data":{"manifestRaw":"some content"}}`,
- expectedRes: []byte("some content"),
- },
- {
- name: "not-found response from hub",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: TektonHubType,
- input: `{"name":"not-found","id":"aaaaaaaa","message":"resource not found","temporary":false,"timeout":false,"fault":false}`,
- expectedRes: []byte(""),
- },
- {
- name: "response with bad formatting error",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: TektonHubType,
- input: `value`,
- expectedErr: errors.New("fail to fetch Tekton Hub resource: error unmarshalling json response: invalid character 'v' looking for beginning of value"),
- },
- {
- name: "response with empty body error from Tekton Hub",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: TektonHubType,
- expectedErr: errors.New("fail to fetch Tekton Hub resource: error unmarshalling json response: unexpected end of JSON input"),
- },
- {
- name: "response with empty body error from Artifact Hub",
- kind: "task",
- imageName: "foo",
- version: "baz",
- catalog: "Tekton",
- hubType: ArtifactHubType,
- expectedErr: errors.New("fail to fetch Artifact Hub resource: error unmarshalling json response: unexpected end of JSON input"),
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, tc.input)
- }))
-
- resolver := &Resolver{
- TektonHubURL: svr.URL,
- ArtifactHubURL: svr.URL,
- }
-
- params := map[string]string{
- hubresolver.ParamKind: tc.kind,
- hubresolver.ParamName: tc.imageName,
- hubresolver.ParamVersion: tc.version,
- hubresolver.ParamCatalog: tc.catalog,
- hubresolver.ParamType: tc.hubType,
- }
- req := v1beta1.ResolutionRequestSpec{
- Params: toParams(params),
- }
- output, err := resolver.Resolve(contextWithConfig(), &req)
- if tc.expectedErr != nil {
- checkExpectedErr(t, tc.expectedErr, err)
- } else {
- if err != nil {
- t.Fatalf("unexpected error resolving: %v", err)
- }
- if d := cmp.Diff(tc.expectedRes, output.Data()); d != "" {
- t.Errorf("unexpected resource from Resolve: %s", diff.PrintWantGot(d))
- }
- }
- })
- }
-}
-
-func toParams(m map[string]string) []pipelinev1.Param {
- var params []pipelinev1.Param
-
- for k, v := range m {
- params = append(params, pipelinev1.Param{
- Name: k,
- Value: *pipelinev1.NewStructuredValues(v),
- })
- }
-
- return params
-}
-
-func contextWithConfig() context.Context {
- config := map[string]string{
- "default-tekton-hub-catalog": "Tekton",
- "default-artifact-hub-task-catalog": "tekton-catalog-tasks",
- "default-artifact-hub-pipeline-catalog": "tekton-catalog-pipelines",
- "default-type": "artifact",
- }
-
- return resolutionframework.InjectResolverConfigToContext(context.Background(), config)
-}
-
-func checkExpectedErr(t *testing.T, expectedErr, actualErr error) {
- t.Helper()
- if actualErr == nil {
- t.Fatalf("expected err '%v' but didn't get one", expectedErr)
- }
- if d := cmp.Diff(expectedErr.Error(), actualErr.Error()); d != "" {
- t.Fatalf("expected err '%v' but got '%v'", expectedErr, actualErr)
- }
-}
diff --git a/upstream/pkg/remoteresolution/resource/crd_resource.go b/upstream/pkg/remoteresolution/resource/crd_resource.go
deleted file mode 100644
index 55c619f7d48..00000000000
--- a/upstream/pkg/remoteresolution/resource/crd_resource.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "context"
- "errors"
-
- rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
- rrlisters "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- resolutionresource "github.com/tektoncd/pipeline/pkg/resolution/resource"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/apis"
-)
-
-// CRDRequester implements the Requester interface using
-// ResolutionRequest CRDs.
-type CRDRequester struct {
- clientset rrclient.Interface
- lister rrlisters.ResolutionRequestLister
-}
-
-// NewCRDRequester returns an implementation of Requester that uses
-// ResolutionRequest CRD objects to mediate between the caller who wants a
-// resource (e.g. Tekton Pipelines) and the responder who can fetch
-// it (e.g. the gitresolver)
-func NewCRDRequester(clientset rrclient.Interface, lister rrlisters.ResolutionRequestLister) *CRDRequester {
- return &CRDRequester{clientset, lister}
-}
-
-var _ Requester = &CRDRequester{}
-
-// Submit constructs a ResolutionRequest object and submits it to the
-// kubernetes cluster, returning any errors experienced while doing so.
-// If ResolutionRequest is succeeded then it returns the resolved data.
-func (r *CRDRequester) Submit(ctx context.Context, resolver ResolverName, req Request) (ResolvedResource, error) {
- rr, _ := r.lister.ResolutionRequests(req.ResolverPayload().Namespace).Get(req.ResolverPayload().Name)
- if rr == nil {
- if err := r.createResolutionRequest(ctx, resolver, req); err != nil &&
- // When the request reconciles frequently, the creation may fail
- // because the list informer cache is not updated.
- // If the request already exists then we can assume that is in progress.
- // The next reconcile will handle it based on the actual situation.
- !apierrors.IsAlreadyExists(err) {
- return nil, err
- }
- return nil, resolutioncommon.ErrRequestInProgress
- }
-
- if rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() {
- // TODO(sbwsg): This should be where an existing
- // resource is given an additional owner reference so
- // that it doesn't get deleted until the caller is done
- // with it. Use appendOwnerReference and then submit
- // update to ResolutionRequest.
- return nil, resolutioncommon.ErrRequestInProgress
- }
-
- if rr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
- return resolutionresource.CrdIntoResource(rr), nil
- }
-
- message := rr.Status.GetCondition(apis.ConditionSucceeded).GetMessage()
- err := resolutioncommon.NewError(resolutioncommon.ReasonResolutionFailed, errors.New(message))
- return nil, err
-}
-
-func (r *CRDRequester) createResolutionRequest(ctx context.Context, resolver ResolverName, req Request) error {
- var owner metav1.OwnerReference
- if ownedReq, ok := req.(OwnedRequest); ok {
- owner = ownedReq.OwnerRef()
- }
- rr := resolutionresource.CreateResolutionRequest(ctx, resolver, req.ResolverPayload().Name, req.ResolverPayload().Namespace, req.ResolverPayload().ResolutionSpec.Params, owner)
- rr.Spec.URL = req.ResolverPayload().ResolutionSpec.URL
- _, err := r.clientset.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{})
- return err
-}
diff --git a/upstream/pkg/remoteresolution/resource/crd_resource_test.go b/upstream/pkg/remoteresolution/resource/crd_resource_test.go
deleted file mode 100644
index 728a52b73b7..00000000000
--- a/upstream/pkg/remoteresolution/resource/crd_resource_test.go
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource_test
-
-import (
- "context"
- "encoding/base64"
- "errors"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/test"
- "github.com/tektoncd/pipeline/test/diff"
- resolution "github.com/tektoncd/pipeline/test/remoteresolution"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/logging"
- _ "knative.dev/pkg/system/testing" // Setup system.Namespace()
- "sigs.k8s.io/yaml"
-)
-
-// getCRDRequester returns an instance of the CRDRequester that has been seeded with
-// d, where d represents the state of the system (existing resources) needed for the test.
-func getCRDRequester(t *testing.T, d test.Data) (test.Assets, func()) {
- t.Helper()
- return initializeCRDRequesterAssets(t, d)
-}
-
-func initializeCRDRequesterAssets(t *testing.T, d test.Data) (test.Assets, func()) {
- t.Helper()
- ctx, _ := ttesting.SetupFakeContext(t)
- ctx, cancel := context.WithCancel(ctx)
- c, informers := test.SeedTestData(t, ctx, d)
-
- return test.Assets{
- Logger: logging.FromContext(ctx),
- Clients: c,
- Informers: informers,
- Ctx: ctx,
- }, cancel
-}
-
-func TestCRDRequesterSubmit(t *testing.T) {
- ownerRef := mustParseOwnerReference(t, `
-apiVersion: tekton.dev/v1beta1
-blockOwnerDeletion: true
-controller: true
-kind: TaskRun
-name: git-clone
-uid: 727019c3-4066-4d8b-919e-90660dfd8b55
-`)
- request := mustParseRawRequest(t, `
-resolverPayload:
- name: git-ec247f5592afcaefa8485e34d2bd80c6
- namespace: namespace
- resolutionSpec:
- params:
- - name: url
- value: https://github.com/tektoncd/catalog
- - name: revision
- value: main
- - name: pathInRepo
- value: task/git-clone/0.6/git-clone.yaml
- url: "https://foo/bar"
-`)
- baseRR := mustParseResolutionRequest(t, `
-kind: "ResolutionRequest"
-apiVersion: "resolution.tekton.dev/v1beta1"
-metadata:
- name: "git-ec247f5592afcaefa8485e34d2bd80c6"
- namespace: "namespace"
- labels:
- resolution.tekton.dev/type: "git"
- ownerReferences:
- - apiVersion: tekton.dev/v1beta1
- blockOwnerDeletion: true
- controller: true
- kind: TaskRun
- name: git-clone
- uid: 727019c3-4066-4d8b-919e-90660dfd8b55
-spec:
- params:
- - name: "url"
- value: "https://github.com/tektoncd/catalog"
- - name: "revision"
- value: "main"
- - name: "pathInRepo"
- value: "task/git-clone/0.6/git-clone.yaml"
- url: "https://foo/bar"
-`)
- createdRR := baseRR.DeepCopy()
- //
- unknownRR := baseRR.DeepCopy()
- unknownRR.Status = *mustParseResolutionRequestStatus(t, `
-conditions:
- - lastTransitionTime: "2023-03-26T10:31:29Z"
- status: "Unknown"
- type: Succeeded
-`)
- //
- failedRR := baseRR.DeepCopy()
- failedRR.Status = *mustParseResolutionRequestStatus(t, `
-conditions:
- - lastTransitionTime: "2023-03-26T10:31:29Z"
- status: "Failed"
- type: Succeeded
- message: "error message"
-`)
- //
- successRR := baseRR.DeepCopy()
- successRR.Status = *mustParseResolutionRequestStatus(t, `
-annotations:
- resolution.tekton.dev/content-type: application/x-yaml
- resolution.tekton.dev/path: task/git-clone/0.6/git-clone.yaml
- resolution.tekton.dev/revision: main
- resolution.tekton.dev/url: https://github.com/tektoncd/catalog
-conditions:
- - lastTransitionTime: "2023-03-26T10:31:29Z"
- status: "True"
- type: Succeeded
- data: e30=
-`)
- //
- successWithoutAnnotationsRR := baseRR.DeepCopy()
- successWithoutAnnotationsRR.Status = *mustParseResolutionRequestStatus(t, `
-conditions:
- - lastTransitionTime: "2023-03-26T10:31:29Z"
- status: "True"
- type: Succeeded
- data: e30=
-`)
-
- testCases := []struct {
- name string
- inputRequest *resolution.RawRequest
- inputResolutionRequest *v1beta1.ResolutionRequest
- expectedResolutionRequest *v1beta1.ResolutionRequest
- expectedResolvedResource *v1beta1.ResolutionRequest
- expectedErr error
- }{
- {
- name: "resolution request does not exist and needs to be created",
- inputRequest: request,
- inputResolutionRequest: nil,
- expectedResolutionRequest: createdRR.DeepCopy(),
- expectedResolvedResource: nil,
- expectedErr: resolutioncommon.ErrRequestInProgress,
- },
- {
- name: "resolution request exist and status is unknown",
- inputRequest: request,
- inputResolutionRequest: unknownRR.DeepCopy(),
- expectedResolutionRequest: nil,
- expectedResolvedResource: nil,
- expectedErr: resolutioncommon.ErrRequestInProgress,
- },
- {
- name: "resolution request exist and status is succeeded",
- inputRequest: request,
- inputResolutionRequest: successRR.DeepCopy(),
- expectedResolutionRequest: nil,
- expectedResolvedResource: successRR.DeepCopy(),
- expectedErr: nil,
- },
- {
- name: "resolution request exist and status is succeeded but annotations is nil",
- inputRequest: request,
- inputResolutionRequest: successWithoutAnnotationsRR.DeepCopy(),
- expectedResolutionRequest: nil,
- expectedResolvedResource: successWithoutAnnotationsRR.DeepCopy(),
- expectedErr: nil,
- },
- {
- name: "resolution request exist and status is failed",
- inputRequest: request,
- inputResolutionRequest: failedRR.DeepCopy(),
- expectedResolutionRequest: nil,
- expectedResolvedResource: nil,
- expectedErr: resolutioncommon.NewError(resolutioncommon.ReasonResolutionFailed, errors.New("error message")),
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- d := test.Data{}
- if tc.inputResolutionRequest != nil {
- d.ResolutionRequests = []*v1beta1.ResolutionRequest{tc.inputResolutionRequest}
- }
-
- testAssets, cancel := getCRDRequester(t, d)
- defer cancel()
- ctx := testAssets.Ctx
- clients := testAssets.Clients
-
- resolver := resolutioncommon.ResolverName("git")
- crdRequester := resource.NewCRDRequester(clients.ResolutionRequests, testAssets.Informers.ResolutionRequest.Lister())
- requestWithOwner := &ownerRequest{
- Request: tc.inputRequest.Request(),
- ownerRef: *ownerRef,
- }
- resolvedResource, err := crdRequester.Submit(ctx, resolver, requestWithOwner)
-
- // check the error
- if err != nil || tc.expectedErr != nil {
- if err == nil || tc.expectedErr == nil {
- t.Errorf("expected error %v, but got %v", tc.expectedErr, err)
- } else if err.Error() != tc.expectedErr.Error() {
- t.Errorf("expected error %v, but got %v", tc.expectedErr, err)
- }
- }
-
- // check the resolved resource
- switch {
- case tc.expectedResolvedResource == nil:
- // skipping check of resolved resources.
- case tc.expectedResolvedResource != nil:
- if resolvedResource == nil {
- t.Errorf("expected resolved resource equal %v, but got %v", tc.expectedResolvedResource, resolvedResource)
- break
- }
- rr := tc.expectedResolvedResource
- data, err := base64.StdEncoding.Strict().DecodeString(rr.Status.Data)
- if err != nil {
- t.Errorf("unexpected error decoding expected resource data: %v", err)
- }
- expectedResolvedResource := resolution.NewResolvedResource(data, rr.Status.Annotations, rr.Status.RefSource, nil)
- assertResolvedResourceEqual(t, expectedResolvedResource, resolvedResource)
- }
-
- // check the resolution request
- if tc.expectedResolutionRequest != nil {
- resolutionrequest, err := clients.ResolutionRequests.ResolutionV1beta1().
- ResolutionRequests(tc.inputRequest.ResolverPayload.Namespace).Get(ctx, tc.inputRequest.ResolverPayload.Name, metav1.GetOptions{})
- if err != nil {
- t.Errorf("unexpected error getting resource requests: %v", err)
- }
- if d := cmp.Diff(tc.expectedResolutionRequest, resolutionrequest); d != "" {
- t.Errorf("expected resolution request to match %s", diff.PrintWantGot(d))
- }
- }
- })
- }
-}
-
-type ownerRequest struct {
- resource.Request
- ownerRef metav1.OwnerReference
-}
-
-func (r *ownerRequest) OwnerRef() metav1.OwnerReference {
- return r.ownerRef
-}
-
-func mustParseRawRequest(t *testing.T, yamlStr string) *resolution.RawRequest {
- t.Helper()
- output := &resolution.RawRequest{}
- if err := yaml.Unmarshal([]byte(yamlStr), output); err != nil {
- t.Errorf("parsing raw request %s: %v", yamlStr, err)
- }
- return output
-}
-
-func mustParseOwnerReference(t *testing.T, yamlStr string) *metav1.OwnerReference {
- t.Helper()
- output := &metav1.OwnerReference{}
- if err := yaml.Unmarshal([]byte(yamlStr), output); err != nil {
- t.Errorf("parsing owner reference %s: %v", yamlStr, err)
- }
- return output
-}
-
-func mustParseResolutionRequest(t *testing.T, yamlStr string) *v1beta1.ResolutionRequest {
- t.Helper()
- output := &v1beta1.ResolutionRequest{}
- if err := yaml.Unmarshal([]byte(yamlStr), output); err != nil {
- t.Errorf("parsing resolution request %s: %v", yamlStr, err)
- }
- return output
-}
-
-func mustParseResolutionRequestStatus(t *testing.T, yamlStr string) *v1beta1.ResolutionRequestStatus {
- t.Helper()
- output := &v1beta1.ResolutionRequestStatus{}
- if err := yaml.Unmarshal([]byte(yamlStr), output); err != nil {
- t.Errorf("parsing resolution request status %s: %v", yamlStr, err)
- }
- return output
-}
-
-func assertResolvedResourceEqual(t *testing.T, expected, actual resolutioncommon.ResolvedResource) {
- t.Helper()
- expectedBytes, err := expected.Data()
- if err != nil {
- t.Errorf("unexpected error getting expected resource data: %v", err)
- }
- actualBytes, err := actual.Data()
- if err != nil {
- t.Errorf("unexpected error getting acutal resource data: %v", err)
- }
- if d := cmp.Diff(expectedBytes, actualBytes); d != "" {
- t.Errorf("expected resolved resource Data to match %s", diff.PrintWantGot(d))
- }
- if d := cmp.Diff(expected.Annotations(), actual.Annotations()); d != "" {
- t.Errorf("expected resolved resource Annotations to match %s", diff.PrintWantGot(d))
- }
- if d := cmp.Diff(expected.RefSource(), actual.RefSource()); d != "" {
- t.Errorf("expected resolved resource Source to match %s", diff.PrintWantGot(d))
- }
-}
diff --git a/upstream/pkg/remoteresolution/resource/doc.go b/upstream/pkg/remoteresolution/resource/doc.go
deleted file mode 100644
index 4f8393963da..00000000000
--- a/upstream/pkg/remoteresolution/resource/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package resource contains the upgraded remote resolution framework.
-It is equivalent to `pkg/resolution/resource`.
-This was necessary to ensure backwards compatibility with the existing framework.
-
-This package is subject to further refactoring and changes.
-*/
-package resource
diff --git a/upstream/pkg/remoteresolution/resource/request.go b/upstream/pkg/remoteresolution/resource/request.go
deleted file mode 100644
index f5472b472fd..00000000000
--- a/upstream/pkg/remoteresolution/resource/request.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "context"
-
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
-)
-
-type BasicRequest struct {
- resolverPayload ResolverPayload
-}
-
-var _ Request = &BasicRequest{}
-
-// NewRequest returns an instance of a BasicRequestV2 with the given resolverPayload.
-func NewRequest(resolverPayload ResolverPayload) Request {
- return &BasicRequest{resolverPayload}
-}
-
-var _ Request = &BasicRequest{}
-
-// Params are the map of parameters associated with this request
-func (req *BasicRequest) ResolverPayload() ResolverPayload {
- return req.resolverPayload
-}
-
-// Requester is the interface implemented by a type that knows how to
-// submit requests for remote resources.
-type Requester interface {
- // Submit accepts the name of a resolver to submit a request to
- // along with the request itself.
- Submit(ctx context.Context, name ResolverName, req Request) (ResolvedResource, error)
-}
-
-// Request is implemented by any type that represents a single request
-// for a remote resource. Implementing this interface gives the underlying
-// type an opportunity to control properties such as whether the name of
-// a request has particular properties, whether the request should be made
-// to a specific namespace, and precisely which parameters should be included.
-type Request interface {
- ResolverPayload() ResolverPayload
-}
-
-// ResolverPayload is the struct which holds the payload to create
-// the Resolution Request CRD.
-type ResolverPayload struct {
- Name string
- Namespace string
- ResolutionSpec *v1beta1.ResolutionRequestSpec
-}
-
-// ResolutionRequester is the interface implemented by a type that knows how to
-// submit requests for remote resources.
-type ResolutionRequester interface {
- // SubmitResolutionRequest accepts the name of a resolver to submit a request to
- // along with the request itself.
- SubmitResolutionRequest(ctx context.Context, name ResolverName, req RequestRemoteResource) (ResolvedResource, error)
-}
-
-// RequestRemoteResource is implemented by any type that represents a single request
-// for a remote resource. Implementing this interface gives the underlying
-// type an opportunity to control properties such as whether the name of
-// a request has particular properties, whether the request should be made
-// to a specific namespace, and precisely which parameters should be included.
-type RequestRemoteResource interface {
- ResolverPayload() ResolverPayload
-}
diff --git a/upstream/pkg/remoteresolution/resource/request_test.go b/upstream/pkg/remoteresolution/resource/request_test.go
deleted file mode 100644
index 664f66cc423..00000000000
--- a/upstream/pkg/remoteresolution/resource/request_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource_test
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- "github.com/tektoncd/pipeline/test/diff"
-)
-
-func TestNewRequest(t *testing.T) {
- type args struct {
- resolverPayload resource.ResolverPayload
- }
- type want = args
- golden := args{
- resolverPayload: resource.ResolverPayload{
- Name: "test-name",
- Namespace: "test-namespace",
- ResolutionSpec: &v1beta1.ResolutionRequestSpec{
- Params: v1.Params{
- {Name: "param1", Value: v1.ParamValue{Type: v1.ParamTypeString, StringVal: "value1"}},
- {Name: "param2", Value: v1.ParamValue{Type: v1.ParamTypeString, StringVal: "value2"}},
- },
- URL: "https://foo/bar",
- },
- },
- }
- tests := []struct {
- name string
- args args
- want want
- }{
- {
- name: "empty",
- args: args{},
- want: want{},
- },
- {
- name: "all",
- args: golden,
- want: golden,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- request := resource.NewRequest(tt.args.resolverPayload)
- if request == nil {
- t.Errorf("NewRequest() return nil")
- }
- if d := cmp.Diff(tt.want.resolverPayload, request.ResolverPayload()); d != "" {
- t.Errorf("expected params to match %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/remoteresolution/resource/resource.go b/upstream/pkg/remoteresolution/resource/resource.go
deleted file mode 100644
index ff61f65b4fc..00000000000
--- a/upstream/pkg/remoteresolution/resource/resource.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resource
-
-import (
- "github.com/tektoncd/pipeline/pkg/resolution/common"
-)
-
-// This is an alias for avoiding cycle import
-
-// ResolverName is the type used for a resolver's name and is mostly
-// used to ensure the function signatures that accept it are clear on the
-// purpose for the given string.
-type ResolverName = common.ResolverName
-
-// OwnedRequest is implemented by any type implementing Request that also needs
-// to express a Kubernetes OwnerRef relationship as part of the request being
-// made.
-type OwnedRequest = common.OwnedRequest
-
-// ResolvedResource is implemented by any type that offers a read-only
-// view of the data and metadata of a resolved remote resource.
-type ResolvedResource = common.ResolvedResource
diff --git a/upstream/pkg/resolution/common/errors.go b/upstream/pkg/resolution/common/errors.go
index d304989b8b8..bb680f7fb56 100644
--- a/upstream/pkg/resolution/common/errors.go
+++ b/upstream/pkg/resolution/common/errors.go
@@ -17,21 +17,10 @@ limitations under the License.
package common
import (
- "context"
"errors"
"fmt"
- "slices"
- "strings"
-
- apierrors "k8s.io/apimachinery/pkg/api/errors"
)
-// This error is defined in etcd at
-// https://github.com/etcd-io/etcd/blob/5b226e0abf4100253c94bb71f47d6815877ed5a2/server/etcdserver/errors.go#L30
-// TODO: If/when https://github.com/kubernetes/kubernetes/issues/106491 is addressed,
-// we should stop relying on a hardcoded string.
-var errEtcdLeaderChange = "etcdserver: leader changed"
-
// Error embeds both a short machine-readable string reason for resolution
// problems alongside the original error generated during the resolution flow.
type Error struct {
@@ -176,19 +165,3 @@ func ReasonError(err error) (string, error) {
return reason, resolutionError
}
-
-// IsErrTransient returns true if an error returned by GetTask/GetStepAction is retryable.
-func IsErrTransient(err error) bool {
- switch {
- case apierrors.IsConflict(err):
- return true
- case apierrors.IsServerTimeout(err):
- return true
- case apierrors.IsTimeout(err):
- return true
- default:
- return slices.ContainsFunc([]string{errEtcdLeaderChange, context.DeadlineExceeded.Error()}, func(s string) bool {
- return strings.Contains(err.Error(), s)
- })
- }
-}
diff --git a/upstream/pkg/resolution/common/interface.go b/upstream/pkg/resolution/common/interface.go
index 82bfda68590..3a1968f3695 100644
--- a/upstream/pkg/resolution/common/interface.go
+++ b/upstream/pkg/resolution/common/interface.go
@@ -30,8 +30,6 @@ type ResolverName string
// Requester is the interface implemented by a type that knows how to
// submit requests for remote resources.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.Requester].
type Requester interface {
// Submit accepts the name of a resolver to submit a request to
// along with the request itself.
@@ -43,8 +41,6 @@ type Requester interface {
// type an opportunity to control properties such as whether the name of
// a request has particular properties, whether the request should be made
// to a specific namespace, and precisely which parameters should be included.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.Request].
type Request interface {
Name() string
Namespace() string
diff --git a/upstream/pkg/resolution/resolver/bundle/bundle.go b/upstream/pkg/resolution/resolver/bundle/bundle.go
index bc6a7fed07f..cb5a023de13 100644
--- a/upstream/pkg/resolution/resolver/bundle/bundle.go
+++ b/upstream/pkg/resolution/resolver/bundle/bundle.go
@@ -37,7 +37,6 @@ const (
// RequestOptions are the options used to request a resource from
// a remote bundle.
type RequestOptions struct {
- ServiceAccount string
ImagePullSecret string
Bundle string
EntryName string
diff --git a/upstream/pkg/resolution/resolver/bundle/config.go b/upstream/pkg/resolution/resolver/bundle/config.go
index e46a3151334..45a12846259 100644
--- a/upstream/pkg/resolution/resolver/bundle/config.go
+++ b/upstream/pkg/resolution/resolver/bundle/config.go
@@ -16,9 +16,6 @@ package bundle
const (
// ConfigMapName is the bundle resolver's config map
ConfigMapName = "bundleresolver-config"
- // ConfigServiceAccount is the configuration field name for controlling
- // the Service Account name to use for bundle requests.
- ConfigServiceAccount = "default-service-account"
// ConfigKind is the configuration field name for controlling
// what the layer name in the bundle image is.
ConfigKind = "default-kind"
diff --git a/upstream/pkg/resolution/resolver/bundle/params.go b/upstream/pkg/resolution/resolver/bundle/params.go
index 2712cbe4c09..d410a699f7f 100644
--- a/upstream/pkg/resolution/resolver/bundle/params.go
+++ b/upstream/pkg/resolution/resolver/bundle/params.go
@@ -21,13 +21,8 @@ import (
"github.com/google/go-containerregistry/pkg/name"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/pkg/resolution/resource"
)
-// ParamServiceAccount is the parameter defining what service
-// account name to use for bundle requests.
-const ParamServiceAccount = "serviceAccount"
-
// ParamImagePullSecret is the parameter defining what secret
// name to use for bundle requests.
const ParamImagePullSecret = "secret"
@@ -37,7 +32,7 @@ const ParamBundle = "bundle"
// ParamName is the parameter defining what the layer name in the bundle
// image is.
-const ParamName = resource.ParamName
+const ParamName = "name"
// ParamKind is the parameter defining what the layer kind in the bundle
// image is.
@@ -54,18 +49,6 @@ func OptionsFromParams(ctx context.Context, params []pipelinev1.Param) (RequestO
paramsMap[p.Name] = p.Value
}
- saVal, ok := paramsMap[ParamServiceAccount]
- sa := ""
- if !ok || saVal.StringVal == "" {
- if saString, ok := conf[ConfigServiceAccount]; ok {
- sa = saString
- } else {
- return opts, errors.New("default Service Account was not set during installation of the bundle resolver")
- }
- } else {
- sa = saVal.StringVal
- }
-
bundleVal, ok := paramsMap[ParamBundle]
if !ok || bundleVal.StringVal == "" {
return opts, fmt.Errorf("parameter %q required", ParamBundle)
@@ -85,13 +68,12 @@ func OptionsFromParams(ctx context.Context, params []pipelinev1.Param) (RequestO
if kindString, ok := conf[ConfigKind]; ok {
kind = kindString
} else {
- return opts, errors.New("default resource Kind was not set during installation of the bundle resolver")
+ return opts, errors.New("default resource Kind was not set during installation of the bundle resolver")
}
} else {
kind = kindVal.StringVal
}
- opts.ServiceAccount = sa
opts.ImagePullSecret = paramsMap[ParamImagePullSecret].StringVal
opts.Bundle = bundleVal.StringVal
opts.EntryName = nameVal.StringVal
diff --git a/upstream/pkg/resolution/resolver/bundle/resolver.go b/upstream/pkg/resolution/resolver/bundle/resolver.go
index 95c18f21963..a5cd07ac5f3 100644
--- a/upstream/pkg/resolution/resolver/bundle/resolver.go
+++ b/upstream/pkg/resolution/resolver/bundle/resolver.go
@@ -22,10 +22,10 @@ import (
"time"
"github.com/google/go-containerregistry/pkg/authn/k8schain"
+ kauth "github.com/google/go-containerregistry/pkg/authn/kubernetes"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
+ "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"k8s.io/client-go/kubernetes"
"knative.dev/pkg/client/injection/kube/client"
@@ -34,21 +34,19 @@ import (
const (
disabledError = "cannot handle resolution request, enable-bundles-resolver feature flag not true"
- // TODO(sbwsg): This should be exposed as a configurable option for
- // admins (e.g. via ConfigMap)
- timeoutDuration = time.Minute
-
// LabelValueBundleResolverType is the value to use for the
// resolution.tekton.dev/type label on resource requests
LabelValueBundleResolverType string = "bundles"
+ // TODO(sbwsg): This should be exposed as a configurable option for
+ // admins (e.g. via ConfigMap)
+ timeoutDuration = time.Minute
+
// BundleResolverName is the name that the bundle resolver should be associated with.
BundleResolverName = "bundleresolver"
)
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/bundle.Resolver] instead.
type Resolver struct {
kubeClientSet kubernetes.Interface
}
@@ -77,21 +75,22 @@ func (r *Resolver) GetSelector(context.Context) map[string]string {
}
// ValidateParams ensures parameters from a request are as expected.
-func (r *Resolver) ValidateParams(ctx context.Context, params []v1.Param) error {
- return ValidateParams(ctx, params)
-}
-
-// Resolve uses the given params to resolve the requested file or resource.
-func (r *Resolver) Resolve(ctx context.Context, params []v1.Param) (framework.ResolvedResource, error) {
- return ResolveRequest(ctx, r.kubeClientSet, &v1beta1.ResolutionRequestSpec{Params: params})
+func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
+ if r.isDisabled(ctx) {
+ return errors.New(disabledError)
+ }
+ if _, err := OptionsFromParams(ctx, params); err != nil {
+ return err
+ }
+ return nil
}
// Resolve uses the given params to resolve the requested file or resource.
-func ResolveRequest(ctx context.Context, kubeClientSet kubernetes.Interface, req *v1beta1.ResolutionRequestSpec) (framework.ResolvedResource, error) {
- if isDisabled(ctx) {
+func (r *Resolver) Resolve(ctx context.Context, params []pipelinev1.Param) (framework.ResolvedResource, error) {
+ if r.isDisabled(ctx) {
return nil, errors.New(disabledError)
}
- opts, err := OptionsFromParams(ctx, req.Params)
+ opts, err := OptionsFromParams(ctx, params)
if err != nil {
return nil, err
}
@@ -100,10 +99,10 @@ func ResolveRequest(ctx context.Context, kubeClientSet kubernetes.Interface, req
imagePullSecrets = append(imagePullSecrets, opts.ImagePullSecret)
}
namespace := common.RequestNamespace(ctx)
- kc, err := k8schain.New(ctx, kubeClientSet, k8schain.Options{
+ kc, err := k8schain.New(ctx, r.kubeClientSet, k8schain.Options{
Namespace: namespace,
- ServiceAccountName: opts.ServiceAccount,
ImagePullSecrets: imagePullSecrets,
+ ServiceAccountName: kauth.NoServiceAccount,
})
if err != nil {
return nil, err
@@ -113,17 +112,7 @@ func ResolveRequest(ctx context.Context, kubeClientSet kubernetes.Interface, req
return GetEntry(ctx, kc, opts)
}
-func ValidateParams(ctx context.Context, params []v1.Param) error {
- if isDisabled(ctx) {
- return errors.New(disabledError)
- }
- if _, err := OptionsFromParams(ctx, params); err != nil {
- return err
- }
- return nil
-}
-
-func isDisabled(ctx context.Context) bool {
+func (r *Resolver) isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableBundleResolver
}
diff --git a/upstream/pkg/resolution/resolver/bundle/resolver_test.go b/upstream/pkg/resolution/resolver/bundle/resolver_test.go
index d5e1ce4f84d..4f575d00b8f 100644
--- a/upstream/pkg/resolution/resolver/bundle/resolver_test.go
+++ b/upstream/pkg/resolution/resolver/bundle/resolver_test.go
@@ -30,13 +30,13 @@ import (
"github.com/google/go-containerregistry/pkg/registry"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
+ pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
+ bundle "github.com/tektoncd/pipeline/pkg/resolution/resolver/bundle"
frtesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/internal"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
@@ -56,19 +56,15 @@ const (
func TestGetSelector(t *testing.T) {
resolver := bundle.Resolver{}
sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
+ if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
t.Fatalf("unexpected selector: %v", sel)
} else if typ != bundle.LabelValueBundleResolverType {
t.Fatalf("unexpected type: %q", typ)
}
}
-func TestValidateParamsSecret(t *testing.T) {
+func TestValidateParams(t *testing.T) {
resolver := bundle.Resolver{}
- config := map[string]string{
- bundle.ConfigServiceAccount: "default",
- }
- ctx := framework.InjectResolverConfigToContext(context.Background(), config)
paramsWithTask := []pipelinev1.Param{{
Name: bundle.ParamKind,
@@ -84,7 +80,7 @@ func TestValidateParamsSecret(t *testing.T) {
Value: *pipelinev1.NewStructuredValues("baz"),
}}
- if err := resolver.ValidateParams(ctx, paramsWithTask); err != nil {
+ if err := resolver.ValidateParams(context.Background(), paramsWithTask); err != nil {
t.Fatalf("unexpected error validating params: %v", err)
}
@@ -101,47 +97,7 @@ func TestValidateParamsSecret(t *testing.T) {
Name: bundle.ParamImagePullSecret,
Value: *pipelinev1.NewStructuredValues("baz"),
}}
- if err := resolver.ValidateParams(ctx, paramsWithPipeline); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-}
-
-func TestValidateParamsServiceAccount(t *testing.T) {
- resolver := bundle.Resolver{}
- config := map[string]string{
- bundle.ConfigServiceAccount: "default",
- }
- ctx := framework.InjectResolverConfigToContext(context.Background(), config)
-
- paramsWithTask := []pipelinev1.Param{{
- Name: bundle.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundle.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundle.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundle.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
-
- if err := resolver.ValidateParams(context.Background(), paramsWithTask); err != nil {
- t.Fatalf("unexpected error validating params: %v", err)
- }
-
- paramsWithPipeline := []pipelinev1.Param{{
- Name: bundle.ParamKind,
- Value: *pipelinev1.NewStructuredValues("pipeline"),
- }, {
- Name: bundle.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundle.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }}
- if err := resolver.ValidateParams(ctx, paramsWithPipeline); err != nil {
+ if err := resolver.ValidateParams(context.Background(), paramsWithPipeline); err != nil {
t.Fatalf("unexpected error validating params: %v", err)
}
}
@@ -151,7 +107,7 @@ func TestValidateParamsDisabled(t *testing.T) {
var err error
- paramsSecret := []pipelinev1.Param{{
+ params := []pipelinev1.Param{{
Name: bundle.ParamKind,
Value: *pipelinev1.NewStructuredValues("task"),
}, {
@@ -164,29 +120,7 @@ func TestValidateParamsDisabled(t *testing.T) {
Name: bundle.ParamImagePullSecret,
Value: *pipelinev1.NewStructuredValues("baz"),
}}
- err = resolver.ValidateParams(resolverDisabledContext(), paramsSecret)
- if err == nil {
- t.Fatalf("expected disabled err")
- }
-
- if d := cmp.Diff(disabledError, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
-
- paramsServiceAccount := []pipelinev1.Param{{
- Name: bundle.ParamKind,
- Value: *pipelinev1.NewStructuredValues("task"),
- }, {
- Name: bundle.ParamName,
- Value: *pipelinev1.NewStructuredValues("foo"),
- }, {
- Name: bundle.ParamBundle,
- Value: *pipelinev1.NewStructuredValues("bar"),
- }, {
- Name: bundle.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues("baz"),
- }}
- err = resolver.ValidateParams(resolverDisabledContext(), paramsServiceAccount)
+ err = resolver.ValidateParams(resolverDisabledContext(), params)
if err == nil {
t.Fatalf("expected disabled err")
}
@@ -280,8 +214,7 @@ func TestResolve_KeyChainError(t *testing.T) {
Namespace: resolverconfig.ResolversNamespace(system.Namespace()),
},
Data: map[string]string{
- bundle.ConfigKind: "task",
- bundle.ConfigServiceAccount: "default",
+ bundle.ConfigKind: "task",
},
}},
}
@@ -306,27 +239,26 @@ func TestResolve_KeyChainError(t *testing.T) {
}
type params struct {
- serviceAccount string
- secret string
- bundle string
- name string
- kind string
+ secret string
+ bundle string
+ name string
+ kind string
}
func TestResolve(t *testing.T) {
// example task resource
- exampleTask := &pipelinev1.Task{
+ exampleTask := &pipelinev1beta1.Task{
ObjectMeta: metav1.ObjectMeta{
Name: "example-task",
Namespace: "task-ns",
ResourceVersion: "00002",
},
TypeMeta: metav1.TypeMeta{
- Kind: string(pipelinev1.NamespacedTaskKind),
- APIVersion: "tekton.dev/v1",
+ Kind: string(pipelinev1beta1.NamespacedTaskKind),
+ APIVersion: "tekton.dev/v1beta1",
},
- Spec: pipelinev1.TaskSpec{
- Steps: []pipelinev1.Step{{
+ Spec: pipelinev1beta1.TaskSpec{
+ Steps: []pipelinev1beta1.Step{{
Name: "some-step",
Image: "some-image",
Command: []string{"something"},
@@ -339,7 +271,7 @@ func TestResolve(t *testing.T) {
}
// example pipeline resource
- examplePipeline := &pipelinev1.Pipeline{
+ examplePipeline := &pipelinev1beta1.Pipeline{
ObjectMeta: metav1.ObjectMeta{
Name: "example-pipeline",
Namespace: "pipeline-ns",
@@ -347,14 +279,14 @@ func TestResolve(t *testing.T) {
},
TypeMeta: metav1.TypeMeta{
Kind: "Pipeline",
- APIVersion: "tekton.dev/v1",
+ APIVersion: "tekton.dev/v1beta1",
},
- Spec: pipelinev1.PipelineSpec{
- Tasks: []pipelinev1.PipelineTask{{
+ Spec: pipelinev1beta1.PipelineSpec{
+ Tasks: []pipelinev1beta1.PipelineTask{{
Name: "some-pipeline-task",
- TaskRef: &pipelinev1.TaskRef{
+ TaskRef: &pipelinev1beta1.TaskRef{
Name: "some-task",
- Kind: pipelinev1.NamespacedTaskKind,
+ Kind: pipelinev1beta1.NamespacedTaskKind,
},
}},
},
@@ -368,12 +300,12 @@ func TestResolve(t *testing.T) {
var tooManyObjs []runtime.Object
for i := 0; i <= bundle.MaximumBundleObjects; i++ {
name := fmt.Sprintf("%d-task", i)
- obj := pipelinev1.Task{
+ obj := pipelinev1beta1.Task{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
TypeMeta: metav1.TypeMeta{
- APIVersion: "tekton.dev/v1",
+ APIVersion: "tekton.dev/v1beta1",
Kind: "Task",
},
}
@@ -393,10 +325,10 @@ func TestResolve(t *testing.T) {
"single-pipeline": pushToRegistry(t, r, "single-pipeline", []runtime.Object{examplePipeline}, test.DefaultObjectAnnotationMapper),
"multiple-resources": pushToRegistry(t, r, "multiple-resources", []runtime.Object{exampleTask, examplePipeline}, test.DefaultObjectAnnotationMapper),
"too-many-objs": pushToRegistry(t, r, "too-many-objs", tooManyObjs, asIsMapper),
- "single-task-no-version": pushToRegistry(t, r, "single-task-no-version", []runtime.Object{&pipelinev1.Task{TypeMeta: metav1.TypeMeta{Kind: "task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
- "single-task-no-kind": pushToRegistry(t, r, "single-task-no-kind", []runtime.Object{&pipelinev1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
- "single-task-no-name": pushToRegistry(t, r, "single-task-no-name", []runtime.Object{&pipelinev1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1", Kind: "task"}}}, asIsMapper),
- "single-task-kind-incorrect-form": pushToRegistry(t, r, "single-task-kind-incorrect-form", []runtime.Object{&pipelinev1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1", Kind: "Task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
+ "single-task-no-version": pushToRegistry(t, r, "single-task-no-version", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{Kind: "task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
+ "single-task-no-kind": pushToRegistry(t, r, "single-task-no-kind", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
+ "single-task-no-name": pushToRegistry(t, r, "single-task-no-name", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1", Kind: "task"}}}, asIsMapper),
+ "single-task-kind-incorrect-form": pushToRegistry(t, r, "single-task-kind-incorrect-form", []runtime.Object{&pipelinev1beta1.Task{TypeMeta: metav1.TypeMeta{APIVersion: "tekton.dev/v1beta1", Kind: "Task"}, ObjectMeta: metav1.ObjectMeta{Name: "foo"}}}, asIsMapper),
}
testcases := []struct {
@@ -415,7 +347,7 @@ func TestResolve(t *testing.T) {
kind: "task",
},
imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(taskAsYAML),
}, {
name: "single task: param kind is capitalized, but kind in bundle is not",
args: ¶ms{
@@ -425,7 +357,7 @@ func TestResolve(t *testing.T) {
},
kindInBundle: "task",
imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(taskAsYAML),
}, {
name: "single task: tag is included in the bundle parameter",
args: ¶ms{
@@ -434,7 +366,7 @@ func TestResolve(t *testing.T) {
kind: "task",
},
imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(taskAsYAML),
}, {
name: "single task: using default kind value from configmap",
args: ¶ms{
@@ -442,25 +374,7 @@ func TestResolve(t *testing.T) {
name: "example-task",
},
imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single task: using secret from params",
- args: ¶ms{
- bundle: testImages["single-task"].uri + ":latest",
- name: "example-task",
- secret: "example-secret",
- },
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
- }, {
- name: "single task: using SA from params",
- args: ¶ms{
- bundle: testImages["single-task"].uri + ":latest",
- name: "example-task",
- serviceAccount: "example-sa",
- },
- imageName: "single-task",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(taskAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(taskAsYAML),
}, {
name: "single pipeline",
args: ¶ms{
@@ -469,27 +383,7 @@ func TestResolve(t *testing.T) {
kind: "pipeline",
},
imageName: "single-pipeline",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
- }, {
- name: "single pipeline: with service account",
- args: ¶ms{
- bundle: testImages["single-pipeline"].uri + ":latest",
- name: "example-pipeline",
- kind: "pipeline",
- serviceAccount: "example-sa",
- },
- imageName: "single-pipeline",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
- }, {
- name: "single pipeline: with secret",
- args: ¶ms{
- bundle: testImages["single-pipeline"].uri + ":latest",
- name: "example-pipeline",
- kind: "pipeline",
- secret: "example-secret",
- },
- imageName: "single-pipeline",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(pipelineAsYAML),
}, {
name: "multiple resources: an image has both task and pipeline resource",
args: ¶ms{
@@ -498,7 +392,7 @@ func TestResolve(t *testing.T) {
kind: "pipeline",
},
imageName: "multiple-resources",
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(pipelineAsYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(pipelineAsYAML),
}, {
name: "too many objects in an image",
args: ¶ms{
@@ -506,7 +400,7 @@ func TestResolve(t *testing.T) {
name: "2-task",
kind: "task",
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErrMessage: fmt.Sprintf("contained more than the maximum %d allow objects", bundle.MaximumBundleObjects),
}, {
name: "single task no version",
@@ -515,7 +409,7 @@ func TestResolve(t *testing.T) {
name: "foo",
kind: "task",
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundle.BundleAnnotationAPIVersion),
}, {
name: "single task no kind",
@@ -524,7 +418,7 @@ func TestResolve(t *testing.T) {
name: "foo",
kind: "task",
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundle.BundleAnnotationKind),
}, {
name: "single task no name",
@@ -533,7 +427,7 @@ func TestResolve(t *testing.T) {
name: "foo",
kind: "task",
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErrMessage: fmt.Sprintf("the layer 0 does not contain a %s annotation", bundle.BundleAnnotationName),
}, {
name: "single task kind incorrect form",
@@ -542,15 +436,14 @@ func TestResolve(t *testing.T) {
name: "foo",
kind: "task",
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErrMessage: fmt.Sprintf("the layer 0 the annotation %s must be lowercased and singular, found %s", bundle.BundleAnnotationKind, "Task"),
},
}
resolver := &bundle.Resolver{}
confMap := map[string]string{
- bundle.ConfigKind: "task",
- bundle.ConfigServiceAccount: "default",
+ bundle.ConfigKind: "task",
}
for _, tc := range testcases {
@@ -596,7 +489,7 @@ func TestResolve(t *testing.T) {
}
expectedStatus.Annotations[bundle.ResolverAnnotationName] = tc.args.name
- expectedStatus.Annotations[bundle.ResolverAnnotationAPIVersion] = "v1"
+ expectedStatus.Annotations[bundle.ResolverAnnotationAPIVersion] = "v1beta1"
expectedStatus.RefSource = &pipelinev1.RefSource{
URI: testImages[tc.imageName].uri,
@@ -628,7 +521,7 @@ func createRequest(p *params) *v1beta1.ResolutionRequest {
Namespace: "foo",
CreationTimestamp: metav1.Time{Time: time.Now()},
Labels: map[string]string{
- common.LabelKeyResolverType: bundle.LabelValueBundleResolverType,
+ resolutioncommon.LabelKeyResolverType: bundle.LabelValueBundleResolverType,
},
},
Spec: v1beta1.ResolutionRequestSpec{
@@ -644,9 +537,6 @@ func createRequest(p *params) *v1beta1.ResolutionRequest {
}, {
Name: bundle.ParamImagePullSecret,
Value: *pipelinev1.NewStructuredValues(p.secret),
- }, {
- Name: bundle.ParamServiceAccount,
- Value: *pipelinev1.NewStructuredValues(p.serviceAccount),
}},
},
}
@@ -654,7 +544,7 @@ func createRequest(p *params) *v1beta1.ResolutionRequest {
}
func createError(image, msg string) error {
- return &common.GetResourceError{
+ return &resolutioncommon.GetResourceError{
ResolverName: bundle.BundleResolverName,
Key: "foo/rr",
Original: fmt.Errorf("invalid tekton bundle %s, error: %s", image, msg),
diff --git a/upstream/pkg/resolution/resolver/cluster/resolver.go b/upstream/pkg/resolution/resolver/cluster/resolver.go
index bc9d5cd3e30..6483016b93b 100644
--- a/upstream/pkg/resolution/resolver/cluster/resolver.go
+++ b/upstream/pkg/resolution/resolver/cluster/resolver.go
@@ -21,15 +21,13 @@ import (
"encoding/hex"
"errors"
"fmt"
- "slices"
"strings"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
@@ -52,11 +50,7 @@ const (
var _ framework.Resolver = &Resolver{}
-var supportedKinds = []string{"task", "pipeline", "stepaction"}
-
// Resolver implements a framework.Resolver that can fetch resources from other namespaces.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/cluster.Resolver] instead.
type Resolver struct {
pipelineClientSet clientset.Interface
}
@@ -77,24 +71,25 @@ func (r *Resolver) GetName(_ context.Context) string {
// the cluster resolver to process them.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
- common.LabelKeyResolverType: LabelValueClusterResolverType,
+ resolutioncommon.LabelKeyResolverType: LabelValueClusterResolverType,
}
}
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the cluster resolver.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- return ValidateParams(ctx, params)
+ if r.isDisabled(ctx) {
+ return errors.New(disabledError)
+ }
+
+ _, err := populateParamsWithDefaults(ctx, params)
+ return err
}
// Resolve performs the work of fetching a resource from a namespace with the given
// parameters.
func (r *Resolver) Resolve(ctx context.Context, origParams []pipelinev1.Param) (framework.ResolvedResource, error) {
- return ResolveFromParams(ctx, origParams, r.pipelineClientSet)
-}
-
-func ResolveFromParams(ctx context.Context, origParams []pipelinev1.Param, pipelineClientSet clientset.Interface) (framework.ResolvedResource, error) {
- if isDisabled(ctx) {
+ if r.isDisabled(ctx) {
return nil, errors.New(disabledError)
}
@@ -113,34 +108,53 @@ func ResolveFromParams(ctx context.Context, origParams []pipelinev1.Param, pipel
groupVersion := pipelinev1.SchemeGroupVersion.String()
switch params[KindParam] {
- case "stepaction":
- stepaction, err := pipelineClientSet.TektonV1beta1().StepActions(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
+ case "task":
+ task, err := r.pipelineClientSet.TektonV1().Tasks(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
if err != nil {
- logger.Infof("failed to load stepaction %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
+ logger.Infof("failed to load task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
- uid, data, sha256Checksum, spec, err = fetchStepaction(ctx, pipelinev1beta1.SchemeGroupVersion.String(), stepaction, params)
+ uid = string(task.UID)
+ task.Kind = "Task"
+ task.APIVersion = groupVersion
+ data, err = yaml.Marshal(task)
if err != nil {
+ logger.Infof("failed to marshal task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
- case "task":
- task, err := pipelineClientSet.TektonV1().Tasks(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
+ sha256Checksum, err = task.Checksum()
if err != nil {
- logger.Infof("failed to load task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
- uid, data, sha256Checksum, spec, err = fetchTask(ctx, groupVersion, task, params)
+
+ spec, err = yaml.Marshal(task.Spec)
if err != nil {
+ logger.Infof("failed to marshal the spec of the task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
case "pipeline":
- pipeline, err := pipelineClientSet.TektonV1().Pipelines(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
+ pipeline, err := r.pipelineClientSet.TektonV1().Pipelines(params[NamespaceParam]).Get(ctx, params[NameParam], metav1.GetOptions{})
if err != nil {
logger.Infof("failed to load pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
- uid, data, sha256Checksum, spec, err = fetchPipeline(ctx, groupVersion, pipeline, params)
+ uid = string(pipeline.UID)
+ pipeline.Kind = "Pipeline"
+ pipeline.APIVersion = groupVersion
+ data, err = yaml.Marshal(pipeline)
if err != nil {
+ logger.Infof("failed to marshal pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
+ return nil, err
+ }
+
+ sha256Checksum, err = pipeline.Checksum()
+ if err != nil {
+ return nil, err
+ }
+
+ spec, err = yaml.Marshal(pipeline.Spec)
+ if err != nil {
+ logger.Infof("failed to marshal the spec of the pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
return nil, err
}
default:
@@ -165,6 +179,11 @@ func (r *Resolver) GetConfigName(context.Context) string {
return configMapName
}
+func (r *Resolver) isDisabled(ctx context.Context) bool {
+ cfg := resolverconfig.FromContextOrDefaults(ctx)
+ return !cfg.FeatureFlags.EnableClusterResolver
+}
+
// ResolvedClusterResource implements framework.ResolvedResource and returns
// the resolved file []byte data and an annotation map for any metadata.
type ResolvedClusterResource struct {
@@ -232,7 +251,7 @@ func populateParamsWithDefaults(ctx context.Context, origParams []pipelinev1.Par
} else {
params[KindParam] = pKind.StringVal
}
- if kindVal, ok := params[KindParam]; ok && !isSupportedKind(kindVal) {
+ if kindVal, ok := params[KindParam]; ok && kindVal != "task" && kindVal != "pipeline" {
return nil, fmt.Errorf("unknown or unsupported resource kind '%s'", kindVal)
}
@@ -283,91 +302,3 @@ func isInCommaSeparatedList(checkVal string, commaList string) bool {
}
return false
}
-
-func isDisabled(ctx context.Context) bool {
- cfg := resolverconfig.FromContextOrDefaults(ctx)
- return !cfg.FeatureFlags.EnableClusterResolver
-}
-
-func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- if isDisabled(ctx) {
- return errors.New(disabledError)
- }
-
- _, err := populateParamsWithDefaults(ctx, params)
- return err
-}
-
-func fetchStepaction(ctx context.Context, groupVersion string, stepaction *pipelinev1beta1.StepAction, params map[string]string) (string, []byte, []byte, []byte, error) {
- logger := logging.FromContext(ctx)
- uid := string(stepaction.UID)
- stepaction.Kind = "StepAction"
- stepaction.APIVersion = groupVersion
- data, err := yaml.Marshal(stepaction)
- if err != nil {
- logger.Infof("failed to marshal stepaction %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
- sha256Checksum, err := stepaction.Checksum()
- if err != nil {
- return "", nil, nil, nil, err
- }
-
- spec, err := yaml.Marshal(stepaction.Spec)
- if err != nil {
- logger.Infof("failed to marshal the spec of the task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
- return uid, data, sha256Checksum, spec, nil
-}
-
-func fetchTask(ctx context.Context, groupVersion string, task *pipelinev1.Task, params map[string]string) (string, []byte, []byte, []byte, error) {
- logger := logging.FromContext(ctx)
- uid := string(task.UID)
- task.Kind = "Task"
- task.APIVersion = groupVersion
- data, err := yaml.Marshal(task)
- if err != nil {
- logger.Infof("failed to marshal task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
- sha256Checksum, err := task.Checksum()
- if err != nil {
- return "", nil, nil, nil, err
- }
-
- spec, err := yaml.Marshal(task.Spec)
- if err != nil {
- logger.Infof("failed to marshal the spec of the task %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
- return uid, data, sha256Checksum, spec, nil
-}
-
-func fetchPipeline(ctx context.Context, groupVersion string, pipeline *pipelinev1.Pipeline, params map[string]string) (string, []byte, []byte, []byte, error) {
- logger := logging.FromContext(ctx)
- uid := string(pipeline.UID)
- pipeline.Kind = "Pipeline"
- pipeline.APIVersion = groupVersion
- data, err := yaml.Marshal(pipeline)
- if err != nil {
- logger.Infof("failed to marshal pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
-
- sha256Checksum, err := pipeline.Checksum()
- if err != nil {
- return "", nil, nil, nil, err
- }
-
- spec, err := yaml.Marshal(pipeline.Spec)
- if err != nil {
- logger.Infof("failed to marshal the spec of the pipeline %s from namespace %s: %v", params[NameParam], params[NamespaceParam], err)
- return "", nil, nil, nil, err
- }
- return uid, data, sha256Checksum, spec, nil
-}
-
-func isSupportedKind(kindValue string) bool {
- return slices.Contains[[]string, string](supportedKinds, kindValue)
-}
diff --git a/upstream/pkg/resolution/resolver/cluster/resolver_test.go b/upstream/pkg/resolution/resolver/cluster/resolver_test.go
index dd24ae7846b..e1051a7089d 100644
--- a/upstream/pkg/resolution/resolver/cluster/resolver_test.go
+++ b/upstream/pkg/resolution/resolver/cluster/resolver_test.go
@@ -30,12 +30,12 @@ import (
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
cluster "github.com/tektoncd/pipeline/pkg/resolution/resolver/cluster"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
frtesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/internal"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
@@ -53,7 +53,7 @@ const (
func TestGetSelector(t *testing.T) {
resolver := cluster.Resolver{}
sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
+ if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
t.Fatalf("unexpected selector: %v", sel)
} else if typ != cluster.LabelValueClusterResolverType {
t.Fatalf("unexpected type: %q", typ)
@@ -279,28 +279,6 @@ func TestResolve(t *testing.T) {
t.Fatalf("couldn't marshal pipeline: %v", err)
}
- exampleStepAction := &pipelinev1beta1.StepAction{
- ObjectMeta: metav1.ObjectMeta{
- Name: "example-stepaction",
- Namespace: "stepaction-ns",
- ResourceVersion: "00003",
- UID: "c123",
- },
- TypeMeta: metav1.TypeMeta{
- Kind: "StepAction",
- APIVersion: "tekton.dev/v1beta1",
- },
- Spec: pipelinev1beta1.StepActionSpec{},
- }
- stepActionChecksum, err := exampleStepAction.Checksum()
- if err != nil {
- t.Fatalf("couldn't checksum stepaction: %v", err)
- }
- stepActionAsYAML, err := yaml.Marshal(exampleStepAction)
- if err != nil {
- t.Fatalf("couldn't marshal stepaction: %v", err)
- }
-
testCases := []struct {
name string
kind string
@@ -345,23 +323,6 @@ func TestResolve(t *testing.T) {
},
},
},
- }, {
- name: "successful stepaction",
- kind: "stepaction",
- resourceName: exampleStepAction.Name,
- namespace: exampleStepAction.Namespace,
- expectedStatus: &v1beta1.ResolutionRequestStatus{
- Status: duckv1.Status{},
- ResolutionRequestStatusFields: v1beta1.ResolutionRequestStatusFields{
- Data: base64.StdEncoding.Strict().EncodeToString(stepActionAsYAML),
- RefSource: &pipelinev1.RefSource{
- URI: "/apis/tekton.dev/v1/namespaces/stepaction-ns/stepaction/example-stepaction@c123",
- Digest: map[string]string{
- "sha256": hex.EncodeToString(stepActionChecksum),
- },
- },
- },
- },
}, {
name: "default namespace",
kind: "pipeline",
@@ -399,8 +360,8 @@ func TestResolve(t *testing.T) {
kind: "task",
resourceName: exampleTask.Name,
namespace: "other-ns",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &common.GetResourceError{
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
+ expectedErr: &resolutioncommon.GetResourceError{
ResolverName: cluster.ClusterResolverName,
Key: "foo/rr",
Original: errors.New(`tasks.tekton.dev "example-task" not found`),
@@ -411,8 +372,8 @@ func TestResolve(t *testing.T) {
resourceName: exampleTask.Name,
namespace: "other-ns",
allowedNamespaces: "foo,bar",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &common.InvalidRequestError{
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
+ expectedErr: &resolutioncommon.InvalidRequestError{
ResolutionRequestKey: "foo/rr",
Message: "access to specified namespace other-ns is not allowed",
},
@@ -422,8 +383,8 @@ func TestResolve(t *testing.T) {
resourceName: exampleTask.Name,
namespace: "other-ns",
blockedNamespaces: "foo,other-ns,bar",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
- expectedErr: &common.InvalidRequestError{
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
+ expectedErr: &resolutioncommon.InvalidRequestError{
ResolutionRequestKey: "foo/rr",
Message: "access to specified namespace other-ns is blocked",
},
@@ -466,7 +427,6 @@ func TestResolve(t *testing.T) {
Pipelines: []*pipelinev1.Pipeline{examplePipeline},
ResolutionRequests: []*v1beta1.ResolutionRequest{request},
Tasks: []*pipelinev1.Task{exampleTask},
- StepActions: []*pipelinev1beta1.StepAction{exampleStepAction},
}
resolver := &cluster.Resolver{}
@@ -511,7 +471,7 @@ func createRequest(kind, name, namespace string) *v1beta1.ResolutionRequest {
Namespace: "foo",
CreationTimestamp: metav1.Time{Time: time.Now()},
Labels: map[string]string{
- common.LabelKeyResolverType: cluster.LabelValueClusterResolverType,
+ resolutioncommon.LabelKeyResolverType: cluster.LabelValueClusterResolverType,
},
},
Spec: v1beta1.ResolutionRequestSpec{
diff --git a/upstream/pkg/resolution/resolver/framework/controller.go b/upstream/pkg/resolution/resolver/framework/controller.go
index 61bc3c04626..f1d270a398a 100644
--- a/upstream/pkg/resolution/resolver/framework/controller.go
+++ b/upstream/pkg/resolution/resolver/framework/controller.go
@@ -46,7 +46,7 @@ type ReconcilerModifier = func(reconciler *Reconciler)
// This sets up a lot of the boilerplate that individual resolvers
// shouldn't need to be concerned with since it's common to all of them.
func NewController(ctx context.Context, resolver Resolver, modifiers ...ReconcilerModifier) func(context.Context, configmap.Watcher) *controller.Impl {
- if err := ValidateResolver(ctx, resolver.GetSelector(ctx)); err != nil {
+ if err := validateResolver(ctx, resolver); err != nil {
panic(err.Error())
}
return func(ctx context.Context, cmw configmap.Watcher) *controller.Impl {
@@ -60,7 +60,7 @@ func NewController(ctx context.Context, resolver Resolver, modifiers ...Reconcil
}
r := &Reconciler{
- LeaderAwareFuncs: LeaderAwareFuncs(rrInformer.Lister()),
+ LeaderAwareFuncs: leaderAwareFuncs(rrInformer.Lister()),
kubeClientSet: kubeclientset,
resolutionRequestLister: rrInformer.Lister(),
resolutionRequestClientSet: rrclientset,
@@ -82,7 +82,7 @@ func NewController(ctx context.Context, resolver Resolver, modifiers ...Reconcil
})
_, err := rrInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
- FilterFunc: FilterResolutionRequestsBySelector(resolver.GetSelector(ctx)),
+ FilterFunc: filterResolutionRequestsBySelector(resolver.GetSelector(ctx)),
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: impl.Enqueue,
UpdateFunc: func(oldObj, newObj interface{}) {
@@ -101,35 +101,7 @@ func NewController(ctx context.Context, resolver Resolver, modifiers ...Reconcil
}
}
-// watchConfigChanges binds a framework.Resolver to updates on its
-// configmap, using knative's configmap helpers. This is only done if
-// the resolver implements the framework.ConfigWatcher interface.
-func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
- if configWatcher, ok := reconciler.resolver.(ConfigWatcher); ok {
- logger := logging.FromContext(ctx)
- resolverConfigName := configWatcher.GetConfigName(ctx)
- if resolverConfigName == "" {
- panic("resolver returned empty config name")
- }
- reconciler.configStore = NewConfigStore(resolverConfigName, logger)
- reconciler.configStore.WatchConfigs(cmw)
- }
-}
-
-// applyModifiersAndDefaults applies the given modifiers to
-// a reconciler and, after doing so, sets any default values for things
-// that weren't set by a modifier.
-func applyModifiersAndDefaults(ctx context.Context, r *Reconciler, modifiers []ReconcilerModifier) {
- for _, mod := range modifiers {
- mod(r)
- }
-
- if r.Clock == nil {
- r.Clock = clock.RealClock{}
- }
-}
-
-func FilterResolutionRequestsBySelector(selector map[string]string) func(obj interface{}) bool {
+func filterResolutionRequestsBySelector(selector map[string]string) func(obj interface{}) bool {
return func(obj interface{}) bool {
rr, ok := obj.(*v1beta1.ResolutionRequest)
if !ok {
@@ -155,7 +127,7 @@ func FilterResolutionRequestsBySelector(selector map[string]string) func(obj int
// fact that the controller crashes if they're missing. It looks
// like this is bucketing based on labels. Should we use the filter
// selector from above in the call to lister.List here?
-func LeaderAwareFuncs(lister rrlister.ResolutionRequestLister) reconciler.LeaderAwareFuncs {
+func leaderAwareFuncs(lister rrlister.ResolutionRequestLister) reconciler.LeaderAwareFuncs {
return reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
@@ -184,7 +156,8 @@ var (
ErrorMissingTypeSelector = ErrMissingTypeSelector
)
-func ValidateResolver(ctx context.Context, sel map[string]string) error {
+func validateResolver(ctx context.Context, r Resolver) error {
+ sel := r.GetSelector(ctx)
if sel == nil {
return ErrMissingTypeSelector
}
@@ -193,3 +166,31 @@ func ValidateResolver(ctx context.Context, sel map[string]string) error {
}
return nil
}
+
+// watchConfigChanges binds a framework.Resolver to updates on its
+// configmap, using knative's configmap helpers. This is only done if
+// the resolver implements the framework.ConfigWatcher interface.
+func watchConfigChanges(ctx context.Context, reconciler *Reconciler, cmw configmap.Watcher) {
+ if configWatcher, ok := reconciler.resolver.(ConfigWatcher); ok {
+ logger := logging.FromContext(ctx)
+ resolverConfigName := configWatcher.GetConfigName(ctx)
+ if resolverConfigName == "" {
+ panic("resolver returned empty config name")
+ }
+ reconciler.configStore = NewConfigStore(resolverConfigName, logger)
+ reconciler.configStore.WatchConfigs(cmw)
+ }
+}
+
+// applyModifiersAndDefaults applies the given modifiers to
+// a reconciler and, after doing so, sets any default values for things
+// that weren't set by a modifier.
+func applyModifiersAndDefaults(ctx context.Context, r *Reconciler, modifiers []ReconcilerModifier) {
+ for _, mod := range modifiers {
+ mod(r)
+ }
+
+ if r.Clock == nil {
+ r.Clock = clock.RealClock{}
+ }
+}
diff --git a/upstream/pkg/resolution/resolver/framework/fakeresolver.go b/upstream/pkg/resolution/resolver/framework/fakeresolver.go
index b22349d6f56..0943199601b 100644
--- a/upstream/pkg/resolution/resolver/framework/fakeresolver.go
+++ b/upstream/pkg/resolution/resolver/framework/fakeresolver.go
@@ -103,10 +103,6 @@ func (r *FakeResolver) GetSelector(_ context.Context) map[string]string {
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the fake resolver.
func (r *FakeResolver) ValidateParams(_ context.Context, params []pipelinev1.Param) error {
- return ValidateParams(params)
-}
-
-func ValidateParams(params []pipelinev1.Param) error {
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range params {
paramsMap[p.Name] = p.Value
@@ -136,10 +132,6 @@ func ValidateParams(params []pipelinev1.Param) error {
// Resolve performs the work of fetching a file from the fake resolver given a map of
// parameters.
func (r *FakeResolver) Resolve(_ context.Context, params []pipelinev1.Param) (ResolvedResource, error) {
- return Resolve(params, r.ForParam)
-}
-
-func Resolve(params []pipelinev1.Param, forParam map[string]*FakeResolvedResource) (ResolvedResource, error) {
paramsMap := make(map[string]pipelinev1.ParamValue)
for _, p := range params {
paramsMap[p.Name] = p.Value
@@ -147,7 +139,7 @@ func Resolve(params []pipelinev1.Param, forParam map[string]*FakeResolvedResourc
paramValue := paramsMap[FakeParamName].StringVal
- frr, ok := forParam[paramValue]
+ frr, ok := r.ForParam[paramValue]
if !ok {
return nil, fmt.Errorf("couldn't find resource for param value %s", paramValue)
}
@@ -166,14 +158,9 @@ func Resolve(params []pipelinev1.Param, forParam map[string]*FakeResolvedResourc
var _ TimedResolution = &FakeResolver{}
// GetResolutionTimeout returns the configured timeout for the reconciler, or the default time.Duration if not configured.
-func (r *FakeResolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
- return GetResolutionTimeout(r.Timeout, defaultTimeout), nil
-}
-
-// GetResolutionTimeout returns the input timeout if set to something greater than 0 or the default time.Duration if not configured.
-func GetResolutionTimeout(timeout, defaultTimeout time.Duration) time.Duration {
- if timeout > 0 {
- return timeout
+func (r *FakeResolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration) time.Duration {
+ if r.Timeout > 0 {
+ return r.Timeout
}
return defaultTimeout
}
diff --git a/upstream/pkg/resolution/resolver/framework/interface.go b/upstream/pkg/resolution/resolver/framework/interface.go
index 89c1e897a77..33a9efab4d9 100644
--- a/upstream/pkg/resolution/resolver/framework/interface.go
+++ b/upstream/pkg/resolution/resolver/framework/interface.go
@@ -26,8 +26,6 @@ import (
// Resolver is the interface to implement for type-specific resource
// resolution. It fetches resources from a given type of remote location
// and returns their content along with any associated annotations.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework.Resolver] instead.
type Resolver interface {
// Initialize is called at the moment the resolver controller is
// instantiated and is a good place to setup things like
@@ -90,7 +88,7 @@ type TimedResolution interface {
// object, which includes any request-scoped data like
// resolver config and the request's originating namespace,
// along with a default.
- GetResolutionTimeout(ctx context.Context, timeout time.Duration, params map[string]string) (time.Duration, error)
+ GetResolutionTimeout(ctx context.Context, timeout time.Duration) time.Duration
}
// ResolvedResource returns the data and annotations of a successful
diff --git a/upstream/pkg/resolution/resolver/framework/reconciler.go b/upstream/pkg/resolution/resolver/framework/reconciler.go
index 314d801c2b1..b981ea6e02a 100644
--- a/upstream/pkg/resolution/resolver/framework/reconciler.go
+++ b/upstream/pkg/resolution/resolver/framework/reconciler.go
@@ -43,8 +43,6 @@ import (
// Reconciler handles ResolutionRequest objects, performs functionality
// common to all resolvers and delegates resolver-specific actions
// to its embedded type-specific Resolver object.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/framework.Reconciler] instead.
type Reconciler struct {
// Implements reconciler.LeaderAware
reconciler.LeaderAwareFuncs
@@ -109,18 +107,9 @@ func (r *Reconciler) resolve(ctx context.Context, key string, rr *v1beta1.Resolu
errChan := make(chan error)
resourceChan := make(chan ResolvedResource)
- paramsMap := make(map[string]string)
- for _, p := range rr.Spec.Params {
- paramsMap[p.Name] = p.Value.StringVal
- }
-
timeoutDuration := defaultMaximumResolutionDuration
if timed, ok := r.resolver.(TimedResolution); ok {
- var err error
- timeoutDuration, err = timed.GetResolutionTimeout(ctx, defaultMaximumResolutionDuration, paramsMap)
- if err != nil {
- return err
- }
+ timeoutDuration = timed.GetResolutionTimeout(ctx, defaultMaximumResolutionDuration)
}
// A new context is created for resolution so that timeouts can
diff --git a/upstream/pkg/resolution/resolver/framework/reconciler_test.go b/upstream/pkg/resolution/resolver/framework/reconciler_test.go
index e3164efed81..53b3bfb7a04 100644
--- a/upstream/pkg/resolution/resolver/framework/reconciler_test.go
+++ b/upstream/pkg/resolution/resolver/framework/reconciler_test.go
@@ -30,7 +30,7 @@ import (
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
+ framework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/names"
diff --git a/upstream/pkg/resolution/resolver/git/config.go b/upstream/pkg/resolution/resolver/git/config.go
index 975166d637a..a085bdfac39 100644
--- a/upstream/pkg/resolution/resolver/git/config.go
+++ b/upstream/pkg/resolution/resolver/git/config.go
@@ -16,30 +16,21 @@ limitations under the License.
package git
-import (
- "context"
- "fmt"
- "reflect"
- "strings"
-
- "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
-)
-
const (
- // DefaultTimeoutKey is the configuration field name for controlling
+ // defaultTimeoutKey is the configuration field name for controlling
// the maximum duration of a resolution request for a file from git.
- DefaultTimeoutKey = "fetch-timeout"
+ defaultTimeoutKey = "fetch-timeout"
- // DefaultURLKey is the configuration field name for controlling
+ // defaultURLKey is the configuration field name for controlling
// the git url to fetch the remote resource from.
- DefaultURLKey = "default-url"
+ defaultURLKey = "default-url"
- // DefaultRevisionKey is the configuration field name for controlling
+ // defaultRevisionKey is the configuration field name for controlling
// the revision to fetch the remote resource from.
- DefaultRevisionKey = "default-revision"
+ defaultRevisionKey = "default-revision"
- // DefaultOrgKey is the configuration field name for setting a default organization when using the SCM API.
- DefaultOrgKey = "default-org"
+ // defaultOrgKey is the configuration field name for setting a default organization when using the SCM API.
+ defaultOrgKey = "default-org"
// ServerURLKey is the config map key for the SCM provider URL
ServerURLKey = "server-url"
@@ -52,55 +43,3 @@ const (
// APISecretNamespaceKey is the config map key for the token secret's namespace
APISecretNamespaceKey = "api-token-secret-namespace"
)
-
-type GitResolverConfig map[string]ScmConfig
-
-type ScmConfig struct {
- Timeout string `json:"fetch-timeout"`
- URL string `json:"default-url"`
- Revision string `json:"default-revision"`
- Org string `json:"default-org"`
- ServerURL string `json:"server-url"`
- SCMType string `json:"scm-type"`
- APISecretName string `json:"api-token-secret-name"`
- APISecretKey string `json:"api-token-secret-key"`
- APISecretNamespace string `json:"api-token-secret-namespace"`
-}
-
-func GetGitResolverConfig(ctx context.Context) (GitResolverConfig, error) {
- var scmConfig interface{} = &ScmConfig{}
- structType := reflect.TypeOf(scmConfig).Elem()
- gitResolverConfig := map[string]ScmConfig{}
- conf := framework.GetResolverConfigFromContext(ctx)
- for key, value := range conf {
- var configIdentifier, configKey string
- splittedKeyName := strings.Split(key, ".")
- switch len(splittedKeyName) {
- case 2:
- configKey = splittedKeyName[1]
- configIdentifier = splittedKeyName[0]
- case 1:
- configKey = key
- configIdentifier = "default"
- default:
- return nil, fmt.Errorf("key %s passed in git resolver configmap is invalid", key)
- }
- _, ok := gitResolverConfig[configIdentifier]
- if !ok {
- gitResolverConfig[configIdentifier] = ScmConfig{}
- }
- for i := range structType.NumField() {
- field := structType.Field(i)
- fieldName := field.Name
- jsonTag := field.Tag.Get("json")
- if configKey == jsonTag {
- tokenDetails := gitResolverConfig[configIdentifier]
- var scm interface{} = &tokenDetails
- structValue := reflect.ValueOf(scm).Elem()
- structValue.FieldByName(fieldName).SetString(value)
- gitResolverConfig[configIdentifier] = structValue.Interface().(ScmConfig)
- }
- }
- }
- return gitResolverConfig, nil
-}
diff --git a/upstream/pkg/resolution/resolver/git/config_test.go b/upstream/pkg/resolution/resolver/git/config_test.go
deleted file mode 100644
index b3ec9504afd..00000000000
--- a/upstream/pkg/resolution/resolver/git/config_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package git
-
-import (
- "context"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- resolutionframework "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
- "github.com/tektoncd/pipeline/test/diff"
-)
-
-func TestGetGitResolverConfig(t *testing.T) {
- tests := []struct {
- name string
- wantErr bool
- expectedErr string
- config map[string]string
- expectedConfig GitResolverConfig
- }{
- {
- name: "no config",
- config: map[string]string{},
- expectedConfig: GitResolverConfig{},
- },
- {
- name: "default config",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- DefaultOrgKey: "tektoncd",
- },
- expectedConfig: GitResolverConfig{
- "default": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- Org: "tektoncd",
- },
- },
- },
- {
- name: "default config with default key",
- config: map[string]string{
- "default." + DefaultURLKey: "https://github.com",
- "default." + DefaultRevisionKey: "main",
- },
- expectedConfig: GitResolverConfig{
- "default": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- },
- },
- {
- name: "config with custom key",
- config: map[string]string{
- "test." + DefaultURLKey: "https://github.com",
- "test." + DefaultRevisionKey: "main",
- },
- expectedConfig: GitResolverConfig{
- "test": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- },
- },
- {
- name: "config with custom key and no key",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github.com",
- "test." + DefaultRevisionKey: "main",
- },
- expectedConfig: GitResolverConfig{
- "default": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- "test": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- },
- },
- {
- name: "config with both default and custom key",
- config: map[string]string{
- "default." + DefaultURLKey: "https://github.com",
- "default." + DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github.com",
- "test." + DefaultRevisionKey: "main",
- },
- expectedConfig: GitResolverConfig{
- "default": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- "test": ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- },
- },
- {
- name: "config with invalid format",
- config: map[string]string{
- "default.." + DefaultURLKey: "https://github.com",
- },
- wantErr: true,
- expectedErr: "key default..default-url passed in git resolver configmap is invalid",
- expectedConfig: nil,
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- ctx := resolutionframework.InjectResolverConfigToContext(context.Background(), tc.config)
- gitResolverConfig, err := GetGitResolverConfig(ctx)
- if tc.wantErr {
- if err == nil {
- t.Fatalf("unexpected error parsing git resolver config: %v", err)
- }
- if d := cmp.Diff(tc.expectedErr, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
- }
- if d := cmp.Diff(tc.expectedConfig, gitResolverConfig); d != "" {
- t.Errorf("expected config: %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/resolution/resolver/git/params.go b/upstream/pkg/resolution/resolver/git/params.go
index d7cd114d8d6..679d0b0e9f5 100644
--- a/upstream/pkg/resolution/resolver/git/params.go
+++ b/upstream/pkg/resolution/resolver/git/params.go
@@ -16,29 +16,25 @@ limitations under the License.
package git
-import "github.com/tektoncd/pipeline/pkg/resolution/resource"
-
const (
- // UrlParam is the git repo Url when using the anonymous/full clone approach
- UrlParam string = resource.ParamURL
- // OrgParam is the organization to find the repository in when using the SCM API approach
- OrgParam = "org"
- // RepoParam is the repository to use when using the SCM API approach
- RepoParam = "repo"
- // PathParam is the pathInRepo into the git repo where a file is located. This is used with both approaches.
- PathParam string = "pathInRepo"
- // RevisionParam is the git revision that a file should be fetched from. This is used with both approaches.
- RevisionParam string = "revision"
- // TokenParam is an optional reference to a secret name for SCM API authentication
- TokenParam string = "token"
- // TokenKeyParam is an optional reference to a key in the TokenParam secret for SCM API authentication
- TokenKeyParam string = "tokenKey"
- // DefaultTokenKeyParam is the default key in the TokenParam secret for SCM API authentication
- DefaultTokenKeyParam string = "token"
- // scmTypeParam is an optional string overriding the scm-type configuration (ie: github, gitea, gitlab etc..)
- ScmTypeParam string = "scmType"
- // serverURLParam is an optional string to the server URL for the SCM API to connect to
- ServerURLParam string = "serverURL"
- // ConfigKeyParam is an optional string to provid which scm configuration to use from git resolver configmap
- ConfigKeyParam string = "configKey"
+ // urlParam is the git repo url when using the anonymous/full clone approach
+ urlParam string = "url"
+ // orgParam is the organization to find the repository in when using the SCM API approach
+ orgParam = "org"
+ // repoParam is the repository to use when using the SCM API approach
+ repoParam = "repo"
+ // pathParam is the pathInRepo into the git repo where a file is located. This is used with both approaches.
+ pathParam string = "pathInRepo"
+ // revisionParam is the git revision that a file should be fetched from. This is used with both approaches.
+ revisionParam string = "revision"
+ // tokenParam is an optional reference to a secret name for SCM API authentication
+ tokenParam string = "token"
+ // tokenKeyParam is an optional reference to a key in the tokenParam secret for SCM API authentication
+ tokenKeyParam string = "tokenKey"
+ // defaultTokenKeyParam is the default key in the tokenParam secret for SCM API authentication
+ defaultTokenKeyParam string = "token"
+ // scmTypeParams is an optional string overriding the scm-type configuration (ie: github, gitea, gitlab etc..)
+ scmTypeParam string = "scmType"
+ // serverURLParams is an optional string to the server URL for the SCM API to connect to
+ serverURLParam string = "serverURL"
)
diff --git a/upstream/pkg/resolution/resolver/git/resolver.go b/upstream/pkg/resolution/resolver/git/resolver.go
index 73815e96bbb..34fcd6ad18f 100644
--- a/upstream/pkg/resolution/resolver/git/resolver.go
+++ b/upstream/pkg/resolution/resolver/git/resolver.go
@@ -36,7 +36,8 @@ import (
"github.com/jenkins-x/go-scm/scm/factory"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"go.uber.org/zap"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -73,8 +74,6 @@ const (
var _ framework.Resolver = &Resolver{}
// Resolver implements a framework.Resolver that can fetch files from git.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/git.Resolver] instead.
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
@@ -107,45 +106,41 @@ func (r *Resolver) GetName(_ context.Context) string {
// the gitresolver to process them.
func (r *Resolver) GetSelector(_ context.Context) map[string]string {
return map[string]string{
- common.LabelKeyResolverType: labelValueGitResolverType,
+ resolutioncommon.LabelKeyResolverType: labelValueGitResolverType,
}
}
// ValidateParams returns an error if the given parameter map is not
// valid for a resource request targeting the gitresolver.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- return ValidateParams(ctx, params)
+ if r.isDisabled(ctx) {
+ return errors.New(disabledError)
+ }
+
+ _, err := populateDefaultParams(ctx, params)
+ if err != nil {
+ return err
+ }
+ return nil
}
// Resolve performs the work of fetching a file from git given a map of
// parameters.
func (r *Resolver) Resolve(ctx context.Context, origParams []pipelinev1.Param) (framework.ResolvedResource, error) {
- if IsDisabled(ctx) {
+ if r.isDisabled(ctx) {
return nil, errors.New(disabledError)
}
- params, err := PopulateDefaultParams(ctx, origParams)
+ params, err := populateDefaultParams(ctx, origParams)
if err != nil {
return nil, err
}
- if params[UrlParam] != "" {
- return ResolveAnonymousGit(ctx, params)
+ if params[urlParam] != "" {
+ return r.resolveAnonymousGit(ctx, params)
}
- return ResolveAPIGit(ctx, params, r.kubeClient, r.logger, r.cache, r.ttl, r.clientFunc)
-}
-
-func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- if IsDisabled(ctx) {
- return errors.New(disabledError)
- }
-
- _, err := PopulateDefaultParams(ctx, params)
- if err != nil {
- return err
- }
- return nil
+ return r.resolveAPIGit(ctx, params)
}
// validateRepoURL validates if the given URL is a valid git, http, https URL or
@@ -157,22 +152,83 @@ func validateRepoURL(url string) bool {
return re.MatchString(url)
}
-func ResolveAnonymousGit(ctx context.Context, params map[string]string) (framework.ResolvedResource, error) {
- conf, err := GetScmConfigForParamConfigKey(ctx, params)
+func (r *Resolver) resolveAPIGit(ctx context.Context, params map[string]string) (framework.ResolvedResource, error) {
+ // If we got here, the "repo" param was specified, so use the API approach
+ scmType, serverURL, err := r.getSCMTypeAndServerURL(ctx, params)
if err != nil {
return nil, err
}
- repo := params[UrlParam]
+ secretRef := &secretCacheKey{
+ name: params[tokenParam],
+ key: params[tokenKeyParam],
+ }
+ if secretRef.name != "" {
+ if secretRef.key == "" {
+ secretRef.key = defaultTokenKeyParam
+ }
+ secretRef.ns = common.RequestNamespace(ctx)
+ } else {
+ secretRef = nil
+ }
+ apiToken, err := r.getAPIToken(ctx, secretRef)
+ if err != nil {
+ return nil, err
+ }
+ scmClient, err := r.clientFunc(scmType, serverURL, string(apiToken))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create SCM client: %w", err)
+ }
+
+ orgRepo := fmt.Sprintf("%s/%s", params[orgParam], params[repoParam])
+ path := params[pathParam]
+ ref := params[revisionParam]
+
+ // fetch the actual content from a file in the repo
+ content, _, err := scmClient.Contents.Find(ctx, orgRepo, path, ref)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't fetch resource content: %w", err)
+ }
+ if content == nil || len(content.Data) == 0 {
+ return nil, fmt.Errorf("no content for resource in %s %s", orgRepo, path)
+ }
+
+ // find the actual git commit sha by the ref
+ commit, _, err := scmClient.Git.FindCommit(ctx, orgRepo, ref)
+ if err != nil || commit == nil {
+ return nil, fmt.Errorf("couldn't fetch the commit sha for the ref %s in the repo: %w", ref, err)
+ }
+
+ // fetch the repository URL
+ repo, _, err := scmClient.Repositories.Find(ctx, orgRepo)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't fetch repository: %w", err)
+ }
+
+ return &resolvedGitResource{
+ Content: content.Data,
+ Revision: commit.Sha,
+ Org: params[orgParam],
+ Repo: params[repoParam],
+ Path: content.Path,
+ URL: repo.Clone,
+ }, nil
+}
+
+func (r *Resolver) resolveAnonymousGit(ctx context.Context, params map[string]string) (framework.ResolvedResource, error) {
+ conf := framework.GetResolverConfigFromContext(ctx)
+ repo := params[urlParam]
if repo == "" {
- urlString := conf.URL
- if urlString == "" {
+ if urlString, ok := conf[defaultURLKey]; ok {
+ repo = urlString
+ } else {
return nil, errors.New("default Git Repo Url was not set during installation of the git resolver")
}
}
- revision := params[RevisionParam]
+ revision := params[revisionParam]
if revision == "" {
- revisionString := conf.Revision
- if revisionString == "" {
+ if revisionString, ok := conf[defaultRevisionKey]; ok {
+ revision = revisionString
+ } else {
return nil, errors.New("default Git Revision was not set during installation of the git resolver")
}
}
@@ -215,7 +271,7 @@ func ResolveAnonymousGit(ctx context.Context, params map[string]string) (framewo
return nil, fmt.Errorf("checkout error: %w", err)
}
- path := params[PathParam]
+ path := params[pathParam]
f, err := filesystem.Open(path)
if err != nil {
@@ -231,8 +287,8 @@ func ResolveAnonymousGit(ctx context.Context, params map[string]string) (framewo
return &resolvedGitResource{
Revision: h.String(),
Content: buf.Bytes(),
- URL: params[UrlParam],
- Path: params[PathParam],
+ URL: params[urlParam],
+ Path: params[pathParam],
}, nil
}
@@ -248,87 +304,20 @@ var _ framework.TimedResolution = &Resolver{}
// GetResolutionTimeout returns a time.Duration for the amount of time a
// single git fetch may take. This can be configured with the
// fetch-timeout field in the git-resolver-config configmap.
-func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration, params map[string]string) (time.Duration, error) {
- conf, err := GetScmConfigForParamConfigKey(ctx, params)
- if err != nil {
- return time.Duration(0), err
- }
- if timeoutString := conf.Timeout; timeoutString != "" {
+func (r *Resolver) GetResolutionTimeout(ctx context.Context, defaultTimeout time.Duration) time.Duration {
+ conf := framework.GetResolverConfigFromContext(ctx)
+ if timeoutString, ok := conf[defaultTimeoutKey]; ok {
timeout, err := time.ParseDuration(timeoutString)
- if err != nil {
- return time.Duration(0), err
+ if err == nil {
+ return timeout
}
- return timeout, nil
}
- return defaultTimeout, nil
+ return defaultTimeout
}
-func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
- paramsMap := make(map[string]string)
- for _, p := range params {
- paramsMap[p.Name] = p.Value.StringVal
- }
-
- conf, err := GetScmConfigForParamConfigKey(ctx, paramsMap)
- if err != nil {
- return nil, err
- }
-
- var missingParams []string
-
- if _, ok := paramsMap[RevisionParam]; !ok {
- defaultRevision := conf.Revision
- if defaultRevision != "" {
- paramsMap[RevisionParam] = defaultRevision
- } else {
- missingParams = append(missingParams, RevisionParam)
- }
- }
- if _, ok := paramsMap[PathParam]; !ok {
- missingParams = append(missingParams, PathParam)
- }
-
- if paramsMap[UrlParam] != "" && paramsMap[RepoParam] != "" {
- return nil, fmt.Errorf("cannot specify both '%s' and '%s'", UrlParam, RepoParam)
- }
-
- if paramsMap[UrlParam] == "" && paramsMap[RepoParam] == "" {
- urlString := conf.URL
- if urlString != "" {
- paramsMap[UrlParam] = urlString
- } else {
- return nil, fmt.Errorf("must specify one of '%s' or '%s'", UrlParam, RepoParam)
- }
- }
-
- if paramsMap[RepoParam] != "" {
- if _, ok := paramsMap[OrgParam]; !ok {
- defaultOrg := conf.Org
- if defaultOrg != "" {
- paramsMap[OrgParam] = defaultOrg
- } else {
- return nil, fmt.Errorf("'%s' is required when '%s' is specified", OrgParam, RepoParam)
- }
- }
- }
- if len(missingParams) > 0 {
- return nil, fmt.Errorf("missing required git resolver params: %s", strings.Join(missingParams, ", "))
- }
-
- // validate the url params if we are not using the SCM API
- if paramsMap[RepoParam] == "" && paramsMap[OrgParam] == "" && !validateRepoURL(paramsMap[UrlParam]) {
- return nil, fmt.Errorf("invalid git repository url: %s", paramsMap[UrlParam])
- }
-
- // TODO(sbwsg): validate pathInRepo is valid relative pathInRepo
- return paramsMap, nil
-}
-
-// supports the SPDX format which is recommended by in-toto
-// ref: https://spdx.dev/spdx-specification-21-web-version/#h.49x2ik5
-// ref: https://github.com/in-toto/attestation/blob/main/spec/field_types.md
-func spdxGit(url string) string {
- return "git+" + url
+func (r *Resolver) isDisabled(ctx context.Context) bool {
+ cfg := resolverconfig.FromContextOrDefaults(ctx)
+ return !cfg.FeatureFlags.EnableGitResolver
}
// resolvedGitResource implements framework.ResolvedResource and returns
@@ -353,10 +342,10 @@ func (r *resolvedGitResource) Data() []byte {
// from git.
func (r *resolvedGitResource) Annotations() map[string]string {
m := map[string]string{
- AnnotationKeyRevision: r.Revision,
- AnnotationKeyPath: r.Path,
- AnnotationKeyURL: r.URL,
- common.AnnotationKeyContentType: yamlContentType,
+ AnnotationKeyRevision: r.Revision,
+ AnnotationKeyPath: r.Path,
+ AnnotationKeyURL: r.URL,
+ resolutioncommon.AnnotationKeyContentType: yamlContentType,
}
if r.Org != "" {
@@ -387,73 +376,35 @@ type secretCacheKey struct {
key string
}
-func ResolveAPIGit(ctx context.Context, params map[string]string, kubeclient kubernetes.Interface, logger *zap.SugaredLogger, cache *cache.LRUExpireCache, ttl time.Duration, clientFunc func(string, string, string, ...factory.ClientOptionFunc) (*scm.Client, error)) (framework.ResolvedResource, error) {
- // If we got here, the "repo" param was specified, so use the API approach
- scmType, serverURL, err := getSCMTypeAndServerURL(ctx, params)
- if err != nil {
- return nil, err
- }
- secretRef := &secretCacheKey{
- name: params[TokenParam],
- key: params[TokenKeyParam],
- }
- if secretRef.name != "" {
- if secretRef.key == "" {
- secretRef.key = DefaultTokenKeyParam
- }
- secretRef.ns = common.RequestNamespace(ctx)
- } else {
- secretRef = nil
- }
- apiToken, err := getAPIToken(ctx, secretRef, kubeclient, logger, cache, ttl, params)
- if err != nil {
- return nil, err
- }
- scmClient, err := clientFunc(scmType, serverURL, string(apiToken))
- if err != nil {
- return nil, fmt.Errorf("failed to create SCM client: %w", err)
- }
+func (r *Resolver) getSCMTypeAndServerURL(ctx context.Context, params map[string]string) (string, string, error) {
+ conf := framework.GetResolverConfigFromContext(ctx)
- orgRepo := fmt.Sprintf("%s/%s", params[OrgParam], params[RepoParam])
- path := params[PathParam]
- ref := params[RevisionParam]
-
- // fetch the actual content from a file in the repo
- content, _, err := scmClient.Contents.Find(ctx, orgRepo, path, ref)
- if err != nil {
- return nil, fmt.Errorf("couldn't fetch resource content: %w", err)
+ var scmType, serverURL string
+ if key, ok := params[scmTypeParam]; ok {
+ scmType = key
}
- if content == nil || len(content.Data) == 0 {
- return nil, fmt.Errorf("no content for resource in %s %s", orgRepo, path)
+ if scmType == "" {
+ if key, ok := conf[SCMTypeKey]; ok && scmType == "" {
+ scmType = key
+ } else {
+ return "", "", fmt.Errorf("missing or empty %s value in configmap", SCMTypeKey)
+ }
}
-
- // find the actual git commit sha by the ref
- commit, _, err := scmClient.Git.FindCommit(ctx, orgRepo, ref)
- if err != nil || commit == nil {
- return nil, fmt.Errorf("couldn't fetch the commit sha for the ref %s in the repo: %w", ref, err)
+ if key, ok := params[serverURLParam]; ok {
+ serverURL = key
}
-
- // fetch the repository URL
- repo, _, err := scmClient.Repositories.Find(ctx, orgRepo)
- if err != nil {
- return nil, fmt.Errorf("couldn't fetch repository: %w", err)
+ if serverURL == "" {
+ if key, ok := conf[ServerURLKey]; ok && serverURL == "" {
+ serverURL = key
+ } else {
+ return "", "", fmt.Errorf("missing or empty %s value in configmap", ServerURLKey)
+ }
}
-
- return &resolvedGitResource{
- Content: content.Data,
- Revision: commit.Sha,
- Org: params[OrgParam],
- Repo: params[RepoParam],
- Path: content.Path,
- URL: repo.Clone,
- }, nil
+ return scmType, serverURL, nil
}
-func getAPIToken(ctx context.Context, apiSecret *secretCacheKey, kubeclient kubernetes.Interface, logger *zap.SugaredLogger, cache *cache.LRUExpireCache, ttl time.Duration, params map[string]string) ([]byte, error) {
- conf, err := GetScmConfigForParamConfigKey(ctx, params)
- if err != nil {
- return nil, err
- }
+func (r *Resolver) getAPIToken(ctx context.Context, apiSecret *secretCacheKey) ([]byte, error) {
+ conf := framework.GetResolverConfigFromContext(ctx)
ok := false
@@ -465,96 +416,114 @@ func getAPIToken(ctx context.Context, apiSecret *secretCacheKey, kubeclient kube
}
if apiSecret.name == "" {
- apiSecret.name = conf.APISecretName
- if apiSecret.name == "" {
- err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", RepoParam, APISecretNameKey)
- logger.Info(err)
+ if apiSecret.name, ok = conf[APISecretNameKey]; !ok || apiSecret.name == "" {
+ err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", repoParam, APISecretNameKey)
+ r.logger.Info(err)
return nil, err
}
}
if apiSecret.key == "" {
- apiSecret.key = conf.APISecretKey
- if apiSecret.key == "" {
- err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", RepoParam, APISecretKeyKey)
- logger.Info(err)
+ if apiSecret.key, ok = conf[APISecretKeyKey]; !ok || apiSecret.key == "" {
+ err := fmt.Errorf("cannot get API token, required when specifying '%s' param, '%s' not specified in config", repoParam, APISecretKeyKey)
+ r.logger.Info(err)
return nil, err
}
}
if apiSecret.ns == "" {
- apiSecret.ns = conf.APISecretNamespace
- if apiSecret.ns == "" {
+ if apiSecret.ns, ok = conf[APISecretNamespaceKey]; !ok {
apiSecret.ns = os.Getenv("SYSTEM_NAMESPACE")
}
}
if cacheSecret {
- val, ok := cache.Get(apiSecret)
+ val, ok := r.cache.Get(apiSecret)
if ok {
return val.([]byte), nil
}
}
- secret, err := kubeclient.CoreV1().Secrets(apiSecret.ns).Get(ctx, apiSecret.name, metav1.GetOptions{})
+ secret, err := r.kubeClient.CoreV1().Secrets(apiSecret.ns).Get(ctx, apiSecret.name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
notFoundErr := fmt.Errorf("cannot get API token, secret %s not found in namespace %s", apiSecret.name, apiSecret.ns)
- logger.Info(notFoundErr)
+ r.logger.Info(notFoundErr)
return nil, notFoundErr
}
wrappedErr := fmt.Errorf("error reading API token from secret %s in namespace %s: %w", apiSecret.name, apiSecret.ns, err)
- logger.Info(wrappedErr)
+ r.logger.Info(wrappedErr)
return nil, wrappedErr
}
secretVal, ok := secret.Data[apiSecret.key]
if !ok {
err := fmt.Errorf("cannot get API token, key %s not found in secret %s in namespace %s", apiSecret.key, apiSecret.name, apiSecret.ns)
- logger.Info(err)
+ r.logger.Info(err)
return nil, err
}
if cacheSecret {
- cache.Add(apiSecret, secretVal, ttl)
+ r.cache.Add(apiSecret, secretVal, r.ttl)
}
return secretVal, nil
}
-func getSCMTypeAndServerURL(ctx context.Context, params map[string]string) (string, string, error) {
- conf, err := GetScmConfigForParamConfigKey(ctx, params)
- if err != nil {
- return "", "", err
- }
+func populateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
+ conf := framework.GetResolverConfigFromContext(ctx)
- var scmType, serverURL string
- if key, ok := params[ScmTypeParam]; ok {
- scmType = key
- }
- if scmType == "" {
- scmType = conf.SCMType
+ paramsMap := make(map[string]string)
+ for _, p := range params {
+ paramsMap[p.Name] = p.Value.StringVal
}
- if key, ok := params[ServerURLParam]; ok {
- serverURL = key
+
+ var missingParams []string
+
+ if _, ok := paramsMap[revisionParam]; !ok {
+ if defaultRevision, ok := conf[defaultRevisionKey]; ok {
+ paramsMap[revisionParam] = defaultRevision
+ } else {
+ missingParams = append(missingParams, revisionParam)
+ }
}
- if serverURL == "" {
- serverURL = conf.ServerURL
+ if _, ok := paramsMap[pathParam]; !ok {
+ missingParams = append(missingParams, pathParam)
}
- return scmType, serverURL, nil
-}
-func IsDisabled(ctx context.Context) bool {
- cfg := resolverconfig.FromContextOrDefaults(ctx)
- return !cfg.FeatureFlags.EnableGitResolver
-}
+ if paramsMap[urlParam] != "" && paramsMap[repoParam] != "" {
+ return nil, fmt.Errorf("cannot specify both '%s' and '%s'", urlParam, repoParam)
+ }
-func GetScmConfigForParamConfigKey(ctx context.Context, params map[string]string) (ScmConfig, error) {
- gitResolverConfig, err := GetGitResolverConfig(ctx)
- if err != nil {
- return ScmConfig{}, err
+ if paramsMap[urlParam] == "" && paramsMap[repoParam] == "" {
+ if urlString, ok := conf[defaultURLKey]; ok {
+ paramsMap[urlParam] = urlString
+ } else {
+ return nil, fmt.Errorf("must specify one of '%s' or '%s'", urlParam, repoParam)
+ }
}
- if configKeyToUse, ok := params[ConfigKeyParam]; ok {
- if config, exist := gitResolverConfig[configKeyToUse]; exist {
- return config, nil
+
+ if paramsMap[repoParam] != "" {
+ if _, ok := paramsMap[orgParam]; !ok {
+ if defaultOrg, ok := conf[defaultOrgKey]; ok {
+ paramsMap[orgParam] = defaultOrg
+ } else {
+ return nil, fmt.Errorf("'%s' is required when '%s' is specified", orgParam, repoParam)
+ }
}
- return ScmConfig{}, fmt.Errorf("no git resolver configuration found for configKey %s", configKeyToUse)
}
- return gitResolverConfig["default"], nil
+ if len(missingParams) > 0 {
+ return nil, fmt.Errorf("missing required git resolver params: %s", strings.Join(missingParams, ", "))
+ }
+
+ // validate the url params if we are not using the SCM API
+ if paramsMap[repoParam] == "" && paramsMap[orgParam] == "" && !validateRepoURL(paramsMap[urlParam]) {
+ return nil, fmt.Errorf("invalid git repository url: %s", paramsMap[urlParam])
+ }
+
+ // TODO(sbwsg): validate pathInRepo is valid relative pathInRepo
+ return paramsMap, nil
+}
+
+// supports the SPDX format which is recommended by in-toto
+// ref: https://spdx.dev/spdx-specification-21-web-version/#h.49x2ik5
+// ref: https://github.com/in-toto/attestation/blob/main/spec/field_types.md
+func spdxGit(url string) string {
+ return "git+" + url
}
diff --git a/upstream/pkg/resolution/resolver/git/resolver_test.go b/upstream/pkg/resolution/resolver/git/resolver_test.go
index 9001ab27cbe..a9f4c0490d9 100644
--- a/upstream/pkg/resolution/resolver/git/resolver_test.go
+++ b/upstream/pkg/resolution/resolver/git/resolver_test.go
@@ -36,11 +36,11 @@ import (
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
frtesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/internal"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
@@ -52,7 +52,7 @@ import (
func TestGetSelector(t *testing.T) {
resolver := Resolver{}
sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
+ if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
t.Fatalf("unexpected selector: %v", sel)
} else if typ != labelValueGitResolverType {
t.Fatalf("unexpected type: %q", typ)
@@ -68,57 +68,57 @@ func TestValidateParams(t *testing.T) {
{
name: "params with revision",
params: map[string]string{
- UrlParam: "http://foo/bar/hello/moto",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "http://foo/bar/hello/moto",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "https url",
params: map[string]string{
- UrlParam: "https://foo/bar/hello/moto",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "https://foo/bar/hello/moto",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "https url with username password",
params: map[string]string{
- UrlParam: "https://user:pass@foo/bar/hello/moto",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "https://user:pass@foo/bar/hello/moto",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "git server url",
params: map[string]string{
- UrlParam: "git://repo/hello/moto",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "git://repo/hello/moto",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "git url from a local repository",
params: map[string]string{
- UrlParam: "/tmp/repo",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "/tmp/repo",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "git url from a git ssh repository",
params: map[string]string{
- UrlParam: "git@host.com:foo/bar",
- PathParam: "bar",
- RevisionParam: "baz",
+ urlParam: "git@host.com:foo/bar",
+ pathParam: "bar",
+ revisionParam: "baz",
},
},
{
name: "bad url",
params: map[string]string{
- UrlParam: "foo://bar",
- PathParam: "path",
- RevisionParam: "revision",
+ urlParam: "foo://bar",
+ pathParam: "path",
+ revisionParam: "revision",
},
wantErr: "invalid git repository url: foo://bar",
},
@@ -147,8 +147,8 @@ func TestValidateParamsNotEnabled(t *testing.T) {
var err error
someParams := map[string]string{
- PathParam: "bar",
- RevisionParam: "baz",
+ pathParam: "bar",
+ revisionParam: "baz",
}
err = resolver.ValidateParams(resolverDisabledContext(), toParams(someParams))
if err == nil {
@@ -168,32 +168,32 @@ func TestValidateParams_Failure(t *testing.T) {
{
name: "missing multiple",
params: map[string]string{
- OrgParam: "abcd1234",
- RepoParam: "foo",
+ orgParam: "abcd1234",
+ repoParam: "foo",
},
- expectedErr: fmt.Sprintf("missing required git resolver params: %s, %s", RevisionParam, PathParam),
+ expectedErr: fmt.Sprintf("missing required git resolver params: %s, %s", revisionParam, pathParam),
}, {
name: "no repo or url",
params: map[string]string{
- RevisionParam: "abcd1234",
- PathParam: "/foo/bar",
+ revisionParam: "abcd1234",
+ pathParam: "/foo/bar",
},
expectedErr: "must specify one of 'url' or 'repo'",
}, {
name: "both repo and url",
params: map[string]string{
- RevisionParam: "abcd1234",
- PathParam: "/foo/bar",
- UrlParam: "http://foo",
- RepoParam: "foo",
+ revisionParam: "abcd1234",
+ pathParam: "/foo/bar",
+ urlParam: "http://foo",
+ repoParam: "foo",
},
expectedErr: "cannot specify both 'url' and 'repo'",
}, {
name: "no org with repo",
params: map[string]string{
- RevisionParam: "abcd1234",
- PathParam: "/foo/bar",
- RepoParam: "foo",
+ revisionParam: "abcd1234",
+ pathParam: "/foo/bar",
+ repoParam: "foo",
},
expectedErr: "'org' is required when 'repo' is specified",
},
@@ -216,10 +216,7 @@ func TestValidateParams_Failure(t *testing.T) {
func TestGetResolutionTimeoutDefault(t *testing.T) {
resolver := Resolver{}
defaultTimeout := 30 * time.Minute
- timeout, err := resolver.GetResolutionTimeout(context.Background(), defaultTimeout, map[string]string{})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
+ timeout := resolver.GetResolutionTimeout(context.Background(), defaultTimeout)
if timeout != defaultTimeout {
t.Fatalf("expected default timeout to be returned")
}
@@ -230,45 +227,23 @@ func TestGetResolutionTimeoutCustom(t *testing.T) {
defaultTimeout := 30 * time.Minute
configTimeout := 5 * time.Second
config := map[string]string{
- DefaultTimeoutKey: configTimeout.String(),
+ defaultTimeoutKey: configTimeout.String(),
}
ctx := framework.InjectResolverConfigToContext(context.Background(), config)
- timeout, err := resolver.GetResolutionTimeout(ctx, defaultTimeout, map[string]string{})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
+ timeout := resolver.GetResolutionTimeout(ctx, defaultTimeout)
if timeout != configTimeout {
t.Fatalf("expected timeout from config to be returned")
}
}
-func TestGetResolutionTimeoutCustomIdentifier(t *testing.T) {
- resolver := Resolver{}
- defaultTimeout := 30 * time.Minute
- configTimeout := 5 * time.Second
- identifierConfigTImeout := 10 * time.Second
- config := map[string]string{
- DefaultTimeoutKey: configTimeout.String(),
- "foo." + DefaultTimeoutKey: identifierConfigTImeout.String(),
- }
- ctx := framework.InjectResolverConfigToContext(context.Background(), config)
- timeout, err := resolver.GetResolutionTimeout(ctx, defaultTimeout, map[string]string{"configKey": "foo"})
- if err != nil {
- t.Fatalf("couldn't get default-timeout: %v", err)
- }
- if timeout != identifierConfigTImeout {
- t.Fatalf("expected timeout from config to be returned")
- }
-}
-
func TestResolveNotEnabled(t *testing.T) {
resolver := Resolver{}
var err error
someParams := map[string]string{
- PathParam: "bar",
- RevisionParam: "baz",
+ pathParam: "bar",
+ revisionParam: "baz",
}
_, err = resolver.Resolve(resolverDisabledContext(), toParams(someParams))
if err == nil {
@@ -290,7 +265,6 @@ type params struct {
namespace string
serverURL string
scmType string
- configKey string
}
func TestResolve(t *testing.T) {
@@ -367,7 +341,6 @@ func TestResolve(t *testing.T) {
expectedCommitSHA string
expectedStatus *v1beta1.ResolutionRequestStatus
expectedErr error
- configIdentifer string
}{{
name: "clone: default revision main",
args: ¶ms{
@@ -375,7 +348,7 @@ func TestResolve(t *testing.T) {
url: anonFakeRepoURL,
},
expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
}, {
name: "clone: revision is tag name",
args: ¶ms{
@@ -384,7 +357,7 @@ func TestResolve(t *testing.T) {
url: anonFakeRepoURL,
},
expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
}, {
name: "clone: revision is the full tag name i.e. refs/tags/v1",
args: ¶ms{
@@ -393,7 +366,7 @@ func TestResolve(t *testing.T) {
url: anonFakeRepoURL,
},
expectedCommitSHA: commitSHAsInAnonRepo[2],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte("released content in main branch and in tag v1")),
}, {
name: "clone: revision is a branch name",
args: ¶ms{
@@ -402,7 +375,7 @@ func TestResolve(t *testing.T) {
url: anonFakeRepoURL,
},
expectedCommitSHA: commitSHAsInAnonRepo[1],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("new content in test branch")),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte("new content in test branch")),
}, {
name: "clone: revision is a specific commit sha",
args: ¶ms{
@@ -411,7 +384,7 @@ func TestResolve(t *testing.T) {
url: anonFakeRepoURL,
},
expectedCommitSHA: commitSHAsInAnonRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte("old content in test branch")),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte("old content in test branch")),
}, {
name: "clone: file does not exist",
args: ¶ms{
@@ -444,7 +417,7 @@ func TestResolve(t *testing.T) {
},
apiToken: "some-token",
expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(mainTaskYAML),
}, {
name: "api: successful task",
args: ¶ms{
@@ -462,47 +435,7 @@ func TestResolve(t *testing.T) {
},
apiToken: "some-token",
expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful task from params api information with identifier",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- token: "token-secret",
- tokenKey: "token",
- namespace: "foo",
- configKey: "test",
- },
- config: map[string]string{
- "test." + ServerURLKey: "fake",
- "test." + SCMTypeKey: "fake",
- },
- configIdentifer: "test.",
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
- }, {
- name: "api: successful task with identifier",
- args: ¶ms{
- revision: "main",
- pathInRepo: "tasks/example-task.yaml",
- org: testOrg,
- repo: testRepo,
- configKey: "test",
- },
- config: map[string]string{
- "test." + ServerURLKey: "fake",
- "test." + SCMTypeKey: "fake",
- "test." + APISecretNameKey: "token-secret",
- "test." + APISecretKeyKey: "token",
- "test." + APISecretNamespaceKey: system.Namespace(),
- },
- configIdentifer: "test.",
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(mainTaskYAML),
}, {
name: "api: successful pipeline",
args: ¶ms{
@@ -520,7 +453,7 @@ func TestResolve(t *testing.T) {
},
apiToken: "some-token",
expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainPipelineYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(mainPipelineYAML),
}, {
name: "api: successful pipeline with default revision",
args: ¶ms{
@@ -534,11 +467,11 @@ func TestResolve(t *testing.T) {
APISecretNameKey: "token-secret",
APISecretKeyKey: "token",
APISecretNamespaceKey: system.Namespace(),
- DefaultRevisionKey: "other",
+ defaultRevisionKey: "other",
},
apiToken: "some-token",
expectedCommitSHA: commitSHAsInSCMRepo[1],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(otherPipelineYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(otherPipelineYAML),
}, {
name: "api: successful override scm type and server URL from user params",
@@ -559,7 +492,7 @@ func TestResolve(t *testing.T) {
},
apiToken: "some-token",
expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainTaskYAML),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData(mainTaskYAML),
}, {
name: "api: file does not exist",
args: ¶ms{
@@ -576,7 +509,7 @@ func TestResolve(t *testing.T) {
APISecretNamespaceKey: system.Namespace(),
},
apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErr: createError("couldn't fetch resource content: file testdata/test-org/test-repo/refs/main/pipelines/other-pipeline.yaml does not exist: stat testdata/test-org/test-repo/refs/main/pipelines/other-pipeline.yaml: no such file or directory"),
}, {
name: "api: token not found",
@@ -593,7 +526,7 @@ func TestResolve(t *testing.T) {
APISecretKeyKey: "token",
APISecretNamespaceKey: system.Namespace(),
},
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErr: createError("cannot get API token, secret token-secret not found in namespace " + system.Namespace()),
}, {
name: "api: token secret name not specified",
@@ -610,7 +543,7 @@ func TestResolve(t *testing.T) {
APISecretNamespaceKey: system.Namespace(),
},
apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErr: createError("cannot get API token, required when specifying 'repo' param, 'api-token-secret-name' not specified in config"),
}, {
name: "api: token secret key not specified",
@@ -627,7 +560,7 @@ func TestResolve(t *testing.T) {
APISecretNamespaceKey: system.Namespace(),
},
apiToken: "some-token",
- expectedStatus: resolution.CreateResolutionRequestFailureStatus(),
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
expectedErr: createError("cannot get API token, required when specifying 'repo' param, 'api-token-secret-key' not specified in config"),
}, {
name: "api: SCM type not specified",
@@ -642,9 +575,9 @@ func TestResolve(t *testing.T) {
APISecretKeyKey: "token",
APISecretNamespaceKey: system.Namespace(),
},
- apiToken: "some-token",
- expectedCommitSHA: commitSHAsInSCMRepo[0],
- expectedStatus: resolution.CreateResolutionRequestStatusWithData(mainPipelineYAML),
+ apiToken: "some-token",
+ expectedStatus: internal.CreateResolutionRequestFailureStatus(),
+ expectedErr: createError("missing or empty scm-type value in configmap"),
}}
for _, tc := range testCases {
@@ -655,9 +588,9 @@ func TestResolve(t *testing.T) {
if cfg == nil {
cfg = make(map[string]string)
}
- cfg[tc.configIdentifer+DefaultTimeoutKey] = "1m"
- if cfg[tc.configIdentifer+DefaultRevisionKey] == "" {
- cfg[tc.configIdentifer+DefaultRevisionKey] = plumbing.Master.Short()
+ cfg[defaultTimeoutKey] = "1m"
+ if cfg[defaultRevisionKey] == "" {
+ cfg[defaultRevisionKey] = plumbing.Master.Short()
}
request := createRequest(tc.args)
@@ -690,7 +623,7 @@ func TestResolve(t *testing.T) {
if expectedStatus.Annotations == nil {
expectedStatus.Annotations = make(map[string]string)
}
- expectedStatus.Annotations[common.AnnotationKeyContentType] = "application/x-yaml"
+ expectedStatus.Annotations[resolutioncommon.AnnotationKeyContentType] = "application/x-yaml"
expectedStatus.Annotations[AnnotationKeyRevision] = tc.expectedCommitSHA
expectedStatus.Annotations[AnnotationKeyPath] = tc.args.pathInRepo
@@ -718,8 +651,8 @@ func TestResolve(t *testing.T) {
frtesting.RunResolverReconcileTest(ctx, t, d, resolver, request, expectedStatus, tc.expectedErr, func(resolver framework.Resolver, testAssets test.Assets) {
var secretName, secretNameKey, secretNamespace string
- if tc.config[tc.configIdentifer+APISecretNameKey] != "" && tc.config[tc.configIdentifer+APISecretNamespaceKey] != "" && tc.config[tc.configIdentifer+APISecretKeyKey] != "" && tc.apiToken != "" {
- secretName, secretNameKey, secretNamespace = tc.config[tc.configIdentifer+APISecretNameKey], tc.config[tc.configIdentifer+APISecretKeyKey], tc.config[tc.configIdentifer+APISecretNamespaceKey]
+ if tc.config[APISecretNameKey] != "" && tc.config[APISecretNamespaceKey] != "" && tc.config[APISecretKeyKey] != "" && tc.apiToken != "" {
+ secretName, secretNameKey, secretNamespace = tc.config[APISecretNameKey], tc.config[APISecretKeyKey], tc.config[APISecretNamespaceKey]
}
if tc.args.token != "" && tc.args.namespace != "" && tc.args.tokenKey != "" {
secretName, secretNameKey, secretNamespace = tc.args.token, tc.args.tokenKey, tc.args.namespace
@@ -886,12 +819,12 @@ func createRequest(args *params) *v1beta1.ResolutionRequest {
Namespace: "foo",
CreationTimestamp: metav1.Time{Time: time.Now()},
Labels: map[string]string{
- common.LabelKeyResolverType: labelValueGitResolverType,
+ resolutioncommon.LabelKeyResolverType: labelValueGitResolverType,
},
},
Spec: v1beta1.ResolutionRequestSpec{
Params: []pipelinev1.Param{{
- Name: PathParam,
+ Name: pathParam,
Value: *pipelinev1.NewStructuredValues(args.pathInRepo),
}},
},
@@ -899,57 +832,50 @@ func createRequest(args *params) *v1beta1.ResolutionRequest {
if args.revision != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: RevisionParam,
+ Name: revisionParam,
Value: *pipelinev1.NewStructuredValues(args.revision),
})
}
if args.serverURL != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: ServerURLParam,
+ Name: serverURLParam,
Value: *pipelinev1.NewStructuredValues(args.serverURL),
})
}
if args.scmType != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: ScmTypeParam,
+ Name: scmTypeParam,
Value: *pipelinev1.NewStructuredValues(args.scmType),
})
}
if args.url != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: UrlParam,
+ Name: urlParam,
Value: *pipelinev1.NewStructuredValues(args.url),
})
} else {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: RepoParam,
+ Name: repoParam,
Value: *pipelinev1.NewStructuredValues(args.repo),
})
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: OrgParam,
+ Name: orgParam,
Value: *pipelinev1.NewStructuredValues(args.org),
})
if args.token != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: TokenParam,
+ Name: tokenParam,
Value: *pipelinev1.NewStructuredValues(args.token),
})
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: TokenKeyParam,
+ Name: tokenKeyParam,
Value: *pipelinev1.NewStructuredValues(args.tokenKey),
})
}
}
- if args.configKey != "" {
- rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: ConfigKeyParam,
- Value: *pipelinev1.NewStructuredValues(args.configKey),
- })
- }
-
return rr
}
@@ -958,7 +884,7 @@ func resolverDisabledContext() context.Context {
}
func createError(msg string) error {
- return &common.GetResourceError{
+ return &resolutioncommon.GetResourceError{
ResolverName: gitResolverName,
Key: "foo/rr",
Original: errors.New(msg),
@@ -977,185 +903,3 @@ func toParams(m map[string]string) []pipelinev1.Param {
return params
}
-
-func TestGetScmConfigForParamConfigKey(t *testing.T) {
- tests := []struct {
- name string
- wantErr bool
- expectedErr string
- config map[string]string
- expectedConfig ScmConfig
- params map[string]string
- }{
- {
- name: "no config",
- config: map[string]string{},
- expectedConfig: ScmConfig{},
- },
- {
- name: "default config",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- DefaultOrgKey: "tektoncd",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- Org: "tektoncd",
- },
- },
- {
- name: "default config with default key",
- config: map[string]string{
- "default." + DefaultURLKey: "https://github.com",
- "default." + DefaultRevisionKey: "main",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- },
- {
- name: "default config with default key and default param",
- config: map[string]string{
- "default." + DefaultURLKey: "https://github.com",
- "default." + DefaultRevisionKey: "main",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- params: map[string]string{
- ConfigKeyParam: "default",
- },
- },
- {
- name: "config with custom key",
- config: map[string]string{
- "test." + DefaultURLKey: "https://github.com",
- "test." + DefaultRevisionKey: "main",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- params: map[string]string{
- ConfigKeyParam: "test",
- },
- },
- {
- name: "config with custom key and no param",
- config: map[string]string{
- "test." + DefaultURLKey: "https://github.com",
- "test." + DefaultRevisionKey: "main",
- },
- expectedConfig: ScmConfig{},
- },
- {
- name: "config with custom key and no key and param default",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github1.com",
- "test." + DefaultRevisionKey: "main1",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- params: map[string]string{
- ConfigKeyParam: "default",
- },
- },
- {
- name: "config with custom key and no key and param test",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github1.com",
- "test." + DefaultRevisionKey: "main1",
- },
- expectedConfig: ScmConfig{
- URL: "https://github1.com",
- Revision: "main1",
- },
- params: map[string]string{
- ConfigKeyParam: "test",
- },
- },
- {
- name: "config with both default and custom key and param default",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github1.com",
- "test." + DefaultRevisionKey: "main1",
- },
- expectedConfig: ScmConfig{
- URL: "https://github.com",
- Revision: "main",
- },
- params: map[string]string{
- ConfigKeyParam: "default",
- },
- },
- {
- name: "config with both default and custom key and param test",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github1.com",
- "test." + DefaultRevisionKey: "main1",
- },
- expectedConfig: ScmConfig{
- URL: "https://github1.com",
- Revision: "main1",
- },
- params: map[string]string{
- ConfigKeyParam: "test",
- },
- },
- {
- name: "config with both default and custom key and param test2",
- config: map[string]string{
- DefaultURLKey: "https://github.com",
- DefaultRevisionKey: "main",
- "test." + DefaultURLKey: "https://github1.com",
- "test." + DefaultRevisionKey: "main1",
- },
- expectedConfig: ScmConfig{},
- params: map[string]string{
- ConfigKeyParam: "test2",
- },
- wantErr: true,
- expectedErr: "no git resolver configuration found for configKey test2",
- },
- {
- name: "config with invalid format",
- config: map[string]string{
- "default.." + DefaultURLKey: "https://github.com",
- },
- wantErr: true,
- expectedErr: "key default..default-url passed in git resolver configmap is invalid",
- expectedConfig: ScmConfig{},
- },
- }
- for _, tc := range tests {
- t.Run(tc.name, func(t *testing.T) {
- ctx := framework.InjectResolverConfigToContext(context.Background(), tc.config)
- gitResolverConfig, err := GetScmConfigForParamConfigKey(ctx, tc.params)
- if tc.wantErr {
- if err == nil {
- t.Fatalf("unexpected error parsing git resolver config: %v", err)
- }
- if d := cmp.Diff(tc.expectedErr, err.Error()); d != "" {
- t.Errorf("unexpected error: %s", diff.PrintWantGot(d))
- }
- }
- if d := cmp.Diff(tc.expectedConfig, gitResolverConfig); d != "" {
- t.Errorf("expected config: %s", diff.PrintWantGot(d))
- }
- })
- }
-}
diff --git a/upstream/pkg/resolution/resolver/http/config.go b/upstream/pkg/resolution/resolver/http/config.go
index c8ffd8ed545..0685fdb07ba 100644
--- a/upstream/pkg/resolution/resolver/http/config.go
+++ b/upstream/pkg/resolution/resolver/http/config.go
@@ -17,7 +17,7 @@ limitations under the License.
package http
const (
- // TimeoutKey is the configuration field name for controlling
+ // timeoutKey is the configuration field name for controlling
// the maximum duration of a resolution request for a file from http.
- TimeoutKey = "fetch-timeout"
+ timeoutKey = "fetch-timeout"
)
diff --git a/upstream/pkg/resolution/resolver/http/params.go b/upstream/pkg/resolution/resolver/http/params.go
index 768832f65d8..b2e8c9a9c6c 100644
--- a/upstream/pkg/resolution/resolver/http/params.go
+++ b/upstream/pkg/resolution/resolver/http/params.go
@@ -13,18 +13,16 @@ limitations under the License.
package http
-import "github.com/tektoncd/pipeline/pkg/resolution/resource"
-
const (
- // UrlParam is the URL to fetch the task from
- UrlParam string = resource.ParamURL
+ // urlParam is the URL to fetch the task from
+ urlParam string = "url"
- // HttpBasicAuthUsername is the user name to use for basic auth
- HttpBasicAuthUsername string = "http-username"
+ // httpBasicAuthUsername is the user name to use for basic auth
+ httpBasicAuthUsername string = "http-username"
- // HttpBasicAuthSecret is the reference to a secret in the PipelineRun or TaskRun namespace to use for basic auth
- HttpBasicAuthSecret string = "http-password-secret"
+ // httpBasicAuthSecret is the reference to a secret in the PipelineRun or TaskRun namespace to use for basic auth
+ httpBasicAuthSecret string = "http-password-secret"
- // HttpBasicAuthSecretKey is the key in the httpBasicAuthSecret secret to use for basic auth
- HttpBasicAuthSecretKey string = "http-password-secret-key"
+ // httpBasicAuthSecretKey is the key in the httpBasicAuthSecret secret to use for basic auth
+ httpBasicAuthSecretKey string = "http-password-secret-key"
)
diff --git a/upstream/pkg/resolution/resolver/http/resolver.go b/upstream/pkg/resolution/resolver/http/resolver.go
index 88447f43ba1..49d75bd2dd1 100644
--- a/upstream/pkg/resolution/resolver/http/resolver.go
+++ b/upstream/pkg/resolution/resolver/http/resolver.go
@@ -28,7 +28,7 @@ import (
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
"go.uber.org/zap"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -59,8 +59,6 @@ const (
)
// Resolver implements a framework.Resolver that can fetch files from an HTTP URL
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/http.Resolver] instead.
type Resolver struct {
kubeClient kubernetes.Interface
logger *zap.SugaredLogger
@@ -91,24 +89,31 @@ func (r *Resolver) GetSelector(context.Context) map[string]string {
// ValidateParams ensures parameters from a request are as expected.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- return ValidateParams(ctx, params)
+ if r.isDisabled(ctx) {
+ return errors.New(disabledError)
+ }
+ _, err := populateDefaultParams(ctx, params)
+ if err != nil {
+ return err
+ }
+ return nil
}
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, oParams []pipelinev1.Param) (framework.ResolvedResource, error) {
- if IsDisabled(ctx) {
+ if r.isDisabled(ctx) {
return nil, errors.New(disabledError)
}
- params, err := PopulateDefaultParams(ctx, oParams)
+ params, err := populateDefaultParams(ctx, oParams)
if err != nil {
return nil, err
}
- return FetchHttpResource(ctx, params, r.kubeClient, r.logger)
+ return r.fetchHttpResource(ctx, params)
}
-func IsDisabled(ctx context.Context) bool {
+func (r *Resolver) isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableHttpResolver
}
@@ -146,7 +151,7 @@ func (rr *resolvedHttpResource) RefSource() *pipelinev1.RefSource {
}
}
-func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
+func populateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
paramsMap := make(map[string]string)
for _, p := range params {
paramsMap[p.Name] = p.Value.StringVal
@@ -154,33 +159,33 @@ func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[
var missingParams []string
- if _, ok := paramsMap[UrlParam]; !ok {
- missingParams = append(missingParams, UrlParam)
+ if _, ok := paramsMap[urlParam]; !ok {
+ missingParams = append(missingParams, urlParam)
} else {
- u, err := url.ParseRequestURI(paramsMap[UrlParam])
+ u, err := url.ParseRequestURI(paramsMap[urlParam])
if err != nil {
- return nil, fmt.Errorf("cannot parse url %s: %w", paramsMap[UrlParam], err)
+ return nil, fmt.Errorf("cannot parse url %s: %w", paramsMap[urlParam], err)
}
if u.Scheme != "http" && u.Scheme != "https" {
- return nil, fmt.Errorf("url %s is not a valid http(s) url", paramsMap[UrlParam])
+ return nil, fmt.Errorf("url %s is not a valid http(s) url", paramsMap[urlParam])
}
}
- if username, ok := paramsMap[HttpBasicAuthUsername]; ok {
- if _, ok := paramsMap[HttpBasicAuthSecret]; !ok {
- return nil, fmt.Errorf("missing required param %s when using %s", HttpBasicAuthSecret, HttpBasicAuthUsername)
+ if username, ok := paramsMap[httpBasicAuthUsername]; ok {
+ if _, ok := paramsMap[httpBasicAuthSecret]; !ok {
+ return nil, fmt.Errorf("missing required param %s when using %s", httpBasicAuthSecret, httpBasicAuthUsername)
}
if username == "" {
- return nil, fmt.Errorf("value %s cannot be empty", HttpBasicAuthUsername)
+ return nil, fmt.Errorf("value %s cannot be empty", httpBasicAuthUsername)
}
}
- if secret, ok := paramsMap[HttpBasicAuthSecret]; ok {
- if _, ok := paramsMap[HttpBasicAuthUsername]; !ok {
- return nil, fmt.Errorf("missing required param %s when using %s", HttpBasicAuthUsername, HttpBasicAuthSecret)
+ if secret, ok := paramsMap[httpBasicAuthSecret]; ok {
+ if _, ok := paramsMap[httpBasicAuthUsername]; !ok {
+ return nil, fmt.Errorf("missing required param %s when using %s", httpBasicAuthUsername, httpBasicAuthSecret)
}
if secret == "" {
- return nil, fmt.Errorf("value %s cannot be empty", HttpBasicAuthSecret)
+ return nil, fmt.Errorf("value %s cannot be empty", httpBasicAuthSecret)
}
}
@@ -194,7 +199,7 @@ func PopulateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[
func makeHttpClient(ctx context.Context) (*http.Client, error) {
conf := framework.GetResolverConfigFromContext(ctx)
timeout, _ := time.ParseDuration(defaultHttpTimeoutValue)
- if v, ok := conf[TimeoutKey]; ok {
+ if v, ok := conf[timeoutKey]; ok {
var err error
timeout, err = time.ParseDuration(v)
if err != nil {
@@ -206,7 +211,7 @@ func makeHttpClient(ctx context.Context) (*http.Client, error) {
}, nil
}
-func FetchHttpResource(ctx context.Context, params map[string]string, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) (framework.ResolvedResource, error) {
+func (r *Resolver) fetchHttpResource(ctx context.Context, params map[string]string) (framework.ResolvedResource, error) {
var targetURL string
var ok bool
@@ -215,8 +220,8 @@ func FetchHttpResource(ctx context.Context, params map[string]string, kubeclient
return nil, err
}
- if targetURL, ok = params[UrlParam]; !ok {
- return nil, fmt.Errorf("missing required params: %s", UrlParam)
+ if targetURL, ok = params[urlParam]; !ok {
+ return nil, fmt.Errorf("missing required params: %s", urlParam)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
@@ -225,8 +230,8 @@ func FetchHttpResource(ctx context.Context, params map[string]string, kubeclient
}
// NOTE(chmouel): We already made sure that username and secret was specified by the user
- if secret, ok := params[HttpBasicAuthSecret]; ok && secret != "" {
- if encodedSecret, err := getBasicAuthSecret(ctx, params, kubeclient, logger); err != nil {
+ if secret, ok := params[httpBasicAuthSecret]; ok && secret != "" {
+ if encodedSecret, err := r.getBasicAuthSecret(ctx, params); err != nil {
return nil, err
} else {
req.Header.Set("Authorization", encodedSecret)
@@ -254,44 +259,33 @@ func FetchHttpResource(ctx context.Context, params map[string]string, kubeclient
}, nil
}
-func getBasicAuthSecret(ctx context.Context, params map[string]string, kubeclient kubernetes.Interface, logger *zap.SugaredLogger) (string, error) {
- secretName := params[HttpBasicAuthSecret]
- userName := params[HttpBasicAuthUsername]
+func (r *Resolver) getBasicAuthSecret(ctx context.Context, params map[string]string) (string, error) {
+ secretName := params[httpBasicAuthSecret]
+ userName := params[httpBasicAuthUsername]
tokenSecretKey := defaultBasicAuthSecretKey
- if v, ok := params[HttpBasicAuthSecretKey]; ok {
+ if v, ok := params[httpBasicAuthSecretKey]; ok {
if v != "" {
tokenSecretKey = v
}
}
secretNS := common.RequestNamespace(ctx)
- secret, err := kubeclient.CoreV1().Secrets(secretNS).Get(ctx, secretName, metav1.GetOptions{})
+ secret, err := r.kubeClient.CoreV1().Secrets(secretNS).Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
notFoundErr := fmt.Errorf("cannot get API token, secret %s not found in namespace %s", secretName, secretNS)
- logger.Info(notFoundErr)
+ r.logger.Info(notFoundErr)
return "", notFoundErr
}
wrappedErr := fmt.Errorf("error reading API token from secret %s in namespace %s: %w", secretName, secretNS, err)
- logger.Info(wrappedErr)
+ r.logger.Info(wrappedErr)
return "", wrappedErr
}
secretVal, ok := secret.Data[tokenSecretKey]
if !ok {
err := fmt.Errorf("cannot get API token, key %s not found in secret %s in namespace %s", tokenSecretKey, secretName, secretNS)
- logger.Info(err)
+ r.logger.Info(err)
return "", err
}
return "Basic " + base64.StdEncoding.EncodeToString(
[]byte(fmt.Sprintf("%s:%s", userName, secretVal))), nil
}
-
-func ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- if IsDisabled(ctx) {
- return errors.New(disabledError)
- }
- _, err := PopulateDefaultParams(ctx, params)
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/upstream/pkg/resolution/resolver/http/resolver_test.go b/upstream/pkg/resolution/resolver/http/resolver_test.go
index 960fd8614fc..630b3882d6e 100644
--- a/upstream/pkg/resolution/resolver/http/resolver_test.go
+++ b/upstream/pkg/resolution/resolver/http/resolver_test.go
@@ -33,11 +33,11 @@ import (
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "github.com/tektoncd/pipeline/pkg/internal/resolution"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
frtesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/internal"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
corev1 "k8s.io/api/core/v1"
@@ -68,7 +68,7 @@ const emptyStr = "empty"
func TestGetSelector(t *testing.T) {
resolver := Resolver{}
sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
+ if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
t.Fatalf("unexpected selector: %v", sel)
} else if typ != LabelValueHttpResolverType {
t.Fatalf("unexpected type: %q", typ)
@@ -104,7 +104,7 @@ func TestValidateParams(t *testing.T) {
resolver := Resolver{}
params := map[string]string{}
if tc.url != "nourl" {
- params[UrlParam] = tc.url
+ params[urlParam] = tc.url
}
err := resolver.ValidateParams(contextWithConfig(defaultHttpTimeoutValue), toParams(params))
if tc.expectedErr != nil {
@@ -181,12 +181,12 @@ func TestResolve(t *testing.T) {
if tc.expectedStatus != 0 {
w.WriteHeader(tc.expectedStatus)
}
- fmt.Fprint(w, tc.input)
+ fmt.Fprintf(w, tc.input)
}))
params := []pipelinev1.Param{}
if tc.paramSet {
params = append(params, pipelinev1.Param{
- Name: UrlParam,
+ Name: urlParam,
Value: *pipelinev1.NewStructuredValues(svr.URL),
})
}
@@ -253,12 +253,12 @@ func createRequest(params *params) *v1beta1.ResolutionRequest {
Namespace: "foo",
CreationTimestamp: metav1.Time{Time: time.Now()},
Labels: map[string]string{
- common.LabelKeyResolverType: LabelValueHttpResolverType,
+ resolutioncommon.LabelKeyResolverType: LabelValueHttpResolverType,
},
},
Spec: v1beta1.ResolutionRequestSpec{
Params: []pipelinev1.Param{{
- Name: UrlParam,
+ Name: urlParam,
Value: *pipelinev1.NewStructuredValues(params.url),
}},
},
@@ -269,7 +269,7 @@ func createRequest(params *params) *v1beta1.ResolutionRequest {
s = ""
}
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: HttpBasicAuthSecret,
+ Name: httpBasicAuthSecret,
Value: *pipelinev1.NewStructuredValues(s),
})
}
@@ -280,14 +280,14 @@ func createRequest(params *params) *v1beta1.ResolutionRequest {
s = ""
}
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: HttpBasicAuthUsername,
+ Name: httpBasicAuthUsername,
Value: *pipelinev1.NewStructuredValues(s),
})
}
if params.authSecretKey != "" {
rr.Spec.Params = append(rr.Spec.Params, pipelinev1.Param{
- Name: HttpBasicAuthSecretKey,
+ Name: httpBasicAuthSecretKey,
Value: *pipelinev1.NewStructuredValues(params.authSecretKey),
})
}
@@ -309,12 +309,12 @@ func TestResolverReconcileBasicAuth(t *testing.T) {
{
name: "good/URL Resolution",
taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
},
{
name: "good/URL Resolution with custom basic auth, and custom secret key",
taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
params: ¶ms{
authSecret: "auth-secret",
authUsername: "auth",
@@ -325,7 +325,7 @@ func TestResolverReconcileBasicAuth(t *testing.T) {
{
name: "good/URL Resolution with custom basic auth no custom secret key",
taskContent: sampleTask,
- expectedStatus: resolution.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
+ expectedStatus: internal.CreateResolutionRequestStatusWithData([]byte(sampleTask)),
params: ¶ms{
authSecret: "auth-secret",
authUsername: "auth",
@@ -396,7 +396,7 @@ func TestResolverReconcileBasicAuth(t *testing.T) {
resolver := &Resolver{}
ctx, _ := ttesting.SetupFakeContext(t)
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, tt.taskContent)
+ fmt.Fprintf(w, tt.taskContent)
}))
p := tt.params
if p == nil {
@@ -510,7 +510,7 @@ func toParams(m map[string]string) []pipelinev1.Param {
func contextWithConfig(timeout string) context.Context {
config := map[string]string{
- TimeoutKey: timeout,
+ timeoutKey: timeout,
}
return framework.InjectResolverConfigToContext(context.Background(), config)
}
diff --git a/upstream/pkg/resolution/resolver/hub/params.go b/upstream/pkg/resolution/resolver/hub/params.go
index 211ad7cda9a..6c77736b48f 100644
--- a/upstream/pkg/resolution/resolver/hub/params.go
+++ b/upstream/pkg/resolution/resolver/hub/params.go
@@ -13,8 +13,6 @@ limitations under the License.
package hub
-import "github.com/tektoncd/pipeline/pkg/resolution/resource"
-
// DefaultArtifactHubURL is the default url for the Artifact hub api
const DefaultArtifactHubURL = "https://artifacthub.io"
@@ -32,7 +30,7 @@ const ArtifactHubListTasksEndpoint = "api/v1/packages/tekton-%s/%s/%s"
// ParamName is the parameter defining what the layer name in the bundle
// image is.
-const ParamName = resource.ParamName
+const ParamName = "name"
// ParamKind is the parameter defining what the layer kind in the bundle
// image is.
diff --git a/upstream/pkg/resolution/resolver/hub/resolver.go b/upstream/pkg/resolution/resolver/hub/resolver.go
index db2d9d29413..e94aa390fa5 100644
--- a/upstream/pkg/resolution/resolver/hub/resolver.go
+++ b/upstream/pkg/resolution/resolver/hub/resolver.go
@@ -27,7 +27,7 @@ import (
goversion "github.com/hashicorp/go-version"
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
)
@@ -46,8 +46,6 @@ const (
)
// Resolver implements a framework.Resolver that can fetch files from OCI bundles.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resolver/hub.Resolver] instead.
type Resolver struct {
// TektonHubURL is the URL for hub resolver with type tekton
TektonHubURL string
@@ -79,11 +77,7 @@ func (r *Resolver) GetSelector(context.Context) map[string]string {
// ValidateParams ensures parameters from a request are as expected.
func (r *Resolver) ValidateParams(ctx context.Context, params []pipelinev1.Param) error {
- return ValidateParams(ctx, params, r.TektonHubURL)
-}
-
-func ValidateParams(ctx context.Context, params []pipelinev1.Param, tektonHubUrl string) error {
- if isDisabled(ctx) {
+ if r.isDisabled(ctx) {
return errors.New(disabledError)
}
@@ -91,7 +85,7 @@ func ValidateParams(ctx context.Context, params []pipelinev1.Param, tektonHubUrl
if err != nil {
return fmt.Errorf("failed to populate default params: %w", err)
}
- if err := validateParams(ctx, paramsMap, tektonHubUrl); err != nil {
+ if err := r.validateParams(ctx, paramsMap); err != nil {
return fmt.Errorf("failed to validate params: %w", err)
}
@@ -116,11 +110,7 @@ type artifactHubResponse struct {
// Resolve uses the given params to resolve the requested file or resource.
func (r *Resolver) Resolve(ctx context.Context, params []pipelinev1.Param) (framework.ResolvedResource, error) {
- return Resolve(ctx, params, r.TektonHubURL, r.ArtifactHubURL)
-}
-
-func Resolve(ctx context.Context, params []pipelinev1.Param, tektonHubURL, artifactHubURL string) (framework.ResolvedResource, error) {
- if isDisabled(ctx) {
+ if r.isDisabled(ctx) {
return nil, errors.New(disabledError)
}
@@ -128,12 +118,12 @@ func Resolve(ctx context.Context, params []pipelinev1.Param, tektonHubURL, artif
if err != nil {
return nil, fmt.Errorf("failed to populate default params: %w", err)
}
- if err := validateParams(ctx, paramsMap, tektonHubURL); err != nil {
+ if err := r.validateParams(ctx, paramsMap); err != nil {
return nil, fmt.Errorf("failed to validate params: %w", err)
}
if constraint, err := goversion.NewConstraint(paramsMap[ParamVersion]); err == nil {
- chosen, err := resolveVersionConstraint(ctx, paramsMap, constraint, artifactHubURL, tektonHubURL)
+ chosen, err := r.resolveVersionConstraint(ctx, paramsMap, constraint)
if err != nil {
return nil, err
}
@@ -149,7 +139,7 @@ func Resolve(ctx context.Context, params []pipelinev1.Param, tektonHubURL, artif
// call hub API
switch paramsMap[ParamType] {
case ArtifactHubType:
- url := fmt.Sprintf(fmt.Sprintf("%s/%s", artifactHubURL, ArtifactHubYamlEndpoint),
+ url := fmt.Sprintf(fmt.Sprintf("%s/%s", r.ArtifactHubURL, ArtifactHubYamlEndpoint),
paramsMap[ParamKind], paramsMap[ParamCatalog], paramsMap[ParamName], paramsMap[ParamVersion])
resp := artifactHubResponse{}
if err := fetchHubResource(ctx, url, &resp); err != nil {
@@ -160,7 +150,7 @@ func Resolve(ctx context.Context, params []pipelinev1.Param, tektonHubURL, artif
Content: []byte(resp.Data.YAML),
}, nil
case TektonHubType:
- url := fmt.Sprintf(fmt.Sprintf("%s/%s", tektonHubURL, TektonHubYamlEndpoint),
+ url := fmt.Sprintf(fmt.Sprintf("%s/%s", r.TektonHubURL, TektonHubYamlEndpoint),
paramsMap[ParamCatalog], paramsMap[ParamKind], paramsMap[ParamName], paramsMap[ParamVersion])
resp := tektonHubResponse{}
if err := fetchHubResource(ctx, url, &resp); err != nil {
@@ -208,7 +198,7 @@ func (rr *ResolvedHubResource) RefSource() *pipelinev1.RefSource {
}
}
-func isDisabled(ctx context.Context) bool {
+func (r *Resolver) isDisabled(ctx context.Context) bool {
cfg := resolverconfig.FromContextOrDefaults(ctx)
return !cfg.FeatureFlags.EnableHubResolver
}
@@ -298,92 +288,10 @@ type tektonHubListResult struct {
Data tektonHubListDataResult `json:"data"`
}
-// the Artifact Hub follows the semVer (i.e. ..0)
-// the Tekton Hub follows the simplified semVer (i.e. .)
-// for resolution request with "artifact" type, we append ".0" suffix if the input version is simplified semVer
-// for resolution request with "tekton" type, we only use . part of the input if it is semVer
-func resolveVersion(version, hubType string) (string, error) {
- semVer := strings.Split(version, ".")
- resVer := version
-
- if hubType == ArtifactHubType && len(semVer) == 2 {
- resVer = version + ".0"
- } else if hubType == TektonHubType && len(semVer) > 2 {
- resVer = strings.Join(semVer[0:2], ".")
- }
-
- return resVer, nil
-}
-
-func populateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
- conf := framework.GetResolverConfigFromContext(ctx)
- paramsMap := make(map[string]string)
- for _, p := range params {
- paramsMap[p.Name] = p.Value.StringVal
- }
-
- // type
- if _, ok := paramsMap[ParamType]; !ok {
- if typeString, ok := conf[ConfigType]; ok {
- paramsMap[ParamType] = typeString
- } else {
- return nil, errors.New("default type was not set during installation of the hub resolver")
- }
- }
-
- // kind
- if _, ok := paramsMap[ParamKind]; !ok {
- if kindString, ok := conf[ConfigKind]; ok {
- paramsMap[ParamKind] = kindString
- } else {
- return nil, errors.New("default resource kind was not set during installation of the hub resolver")
- }
- }
-
- // catalog
- resCatName, err := resolveCatalogName(paramsMap, conf)
- if err != nil {
- return nil, err
- }
- paramsMap[ParamCatalog] = resCatName
-
- return paramsMap, nil
-}
-
-func validateParams(ctx context.Context, paramsMap map[string]string, tektonHubURL string) error {
- var missingParams []string
- if _, ok := paramsMap[ParamName]; !ok {
- missingParams = append(missingParams, ParamName)
- }
- if _, ok := paramsMap[ParamVersion]; !ok {
- missingParams = append(missingParams, ParamVersion)
- }
- if kind, ok := paramsMap[ParamKind]; ok {
- if kind != "task" && kind != "pipeline" {
- return errors.New("kind param must be task or pipeline")
- }
- }
- if hubType, ok := paramsMap[ParamType]; ok {
- if hubType != ArtifactHubType && hubType != TektonHubType {
- return fmt.Errorf("type param must be %s or %s", ArtifactHubType, TektonHubType)
- }
-
- if hubType == TektonHubType && tektonHubURL == "" {
- return errors.New("please configure TEKTON_HUB_API env variable to use tekton type")
- }
- }
-
- if len(missingParams) > 0 {
- return fmt.Errorf("missing required hub resolver params: %s", strings.Join(missingParams, ", "))
- }
-
- return nil
-}
-
-func resolveVersionConstraint(ctx context.Context, paramsMap map[string]string, constraint goversion.Constraints, artifactHubURL, tektonHubURL string) (*goversion.Version, error) {
+func (r *Resolver) resolveVersionConstraint(ctx context.Context, paramsMap map[string]string, constraint goversion.Constraints) (*goversion.Version, error) {
var ret *goversion.Version
if paramsMap[ParamType] == ArtifactHubType {
- allVersionsURL := fmt.Sprintf("%s/%s", artifactHubURL, fmt.Sprintf(
+ allVersionsURL := fmt.Sprintf("%s/%s", r.ArtifactHubURL, fmt.Sprintf(
ArtifactHubListTasksEndpoint,
paramsMap[ParamKind], paramsMap[ParamCatalog], paramsMap[ParamName]))
resp := artifactHubListResult{}
@@ -410,7 +318,7 @@ func resolveVersionConstraint(ctx context.Context, paramsMap map[string]string,
}
}
} else if paramsMap[ParamType] == TektonHubType {
- allVersionsURL := fmt.Sprintf("%s/%s", tektonHubURL,
+ allVersionsURL := fmt.Sprintf("%s/%s", r.TektonHubURL,
fmt.Sprintf(TektonHubListTasksEndpoint,
paramsMap[ParamCatalog], paramsMap[ParamKind], paramsMap[ParamName]))
resp := tektonHubListResult{}
@@ -439,3 +347,85 @@ func resolveVersionConstraint(ctx context.Context, paramsMap map[string]string,
}
return ret, nil
}
+
+// the Artifact Hub follows the semVer (i.e. ..0)
+// the Tekton Hub follows the simplified semVer (i.e. .)
+// for resolution request with "artifact" type, we append ".0" suffix if the input version is simplified semVer
+// for resolution request with "tekton" type, we only use . part of the input if it is semVer
+func resolveVersion(version, hubType string) (string, error) {
+ semVer := strings.Split(version, ".")
+ resVer := version
+
+ if hubType == ArtifactHubType && len(semVer) == 2 {
+ resVer = version + ".0"
+ } else if hubType == TektonHubType && len(semVer) > 2 {
+ resVer = strings.Join(semVer[0:2], ".")
+ }
+
+ return resVer, nil
+}
+
+func (r *Resolver) validateParams(ctx context.Context, paramsMap map[string]string) error {
+ var missingParams []string
+ if _, ok := paramsMap[ParamName]; !ok {
+ missingParams = append(missingParams, ParamName)
+ }
+ if _, ok := paramsMap[ParamVersion]; !ok {
+ missingParams = append(missingParams, ParamVersion)
+ }
+ if kind, ok := paramsMap[ParamKind]; ok {
+ if kind != "task" && kind != "pipeline" {
+ return errors.New("kind param must be task or pipeline")
+ }
+ }
+ if hubType, ok := paramsMap[ParamType]; ok {
+ if hubType != ArtifactHubType && hubType != TektonHubType {
+ return fmt.Errorf("type param must be %s or %s", ArtifactHubType, TektonHubType)
+ }
+
+ if hubType == TektonHubType && r.TektonHubURL == "" {
+ return errors.New("please configure TEKTON_HUB_API env variable to use tekton type")
+ }
+ }
+
+ if len(missingParams) > 0 {
+ return fmt.Errorf("missing required hub resolver params: %s", strings.Join(missingParams, ", "))
+ }
+
+ return nil
+}
+
+func populateDefaultParams(ctx context.Context, params []pipelinev1.Param) (map[string]string, error) {
+ conf := framework.GetResolverConfigFromContext(ctx)
+ paramsMap := make(map[string]string)
+ for _, p := range params {
+ paramsMap[p.Name] = p.Value.StringVal
+ }
+
+ // type
+ if _, ok := paramsMap[ParamType]; !ok {
+ if typeString, ok := conf[ConfigType]; ok {
+ paramsMap[ParamType] = typeString
+ } else {
+ return nil, errors.New("default type was not set during installation of the hub resolver")
+ }
+ }
+
+ // kind
+ if _, ok := paramsMap[ParamKind]; !ok {
+ if kindString, ok := conf[ConfigKind]; ok {
+ paramsMap[ParamKind] = kindString
+ } else {
+ return nil, errors.New("default resource kind was not set during installation of the hub resolver")
+ }
+ }
+
+ // catalog
+ resCatName, err := resolveCatalogName(paramsMap, conf)
+ if err != nil {
+ return nil, err
+ }
+ paramsMap[ParamCatalog] = resCatName
+
+ return paramsMap, nil
+}
diff --git a/upstream/pkg/resolution/resolver/hub/resolver_test.go b/upstream/pkg/resolution/resolver/hub/resolver_test.go
index 82335875d7d..474838c1a7f 100644
--- a/upstream/pkg/resolution/resolver/hub/resolver_test.go
+++ b/upstream/pkg/resolution/resolver/hub/resolver_test.go
@@ -29,7 +29,7 @@ import (
"github.com/google/go-cmp/cmp"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resolver/framework"
frtesting "github.com/tektoncd/pipeline/pkg/resolution/resolver/framework/testing"
"github.com/tektoncd/pipeline/test/diff"
@@ -38,7 +38,7 @@ import (
func TestGetSelector(t *testing.T) {
resolver := Resolver{}
sel := resolver.GetSelector(context.Background())
- if typ, has := sel[common.LabelKeyResolverType]; !has {
+ if typ, has := sel[resolutioncommon.LabelKeyResolverType]; !has {
t.Fatalf("unexpected selector: %v", sel)
} else if typ != LabelValueHubResolverType {
t.Fatalf("unexpected type: %q", typ)
@@ -346,7 +346,7 @@ func TestResolveConstraint(t *testing.T) {
ret = tt.resultTask
}
output, _ := json.Marshal(ret)
- fmt.Fprint(w, string(output))
+ fmt.Fprintf(w, string(output))
}))
resolver := &Resolver{
@@ -582,7 +582,7 @@ func TestResolve(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- fmt.Fprint(w, tc.input)
+ fmt.Fprintf(w, tc.input)
}))
resolver := &Resolver{
diff --git a/upstream/pkg/internal/resolution/resolutionrequest.go b/upstream/pkg/resolution/resolver/internal/resolutionrequest.go
similarity index 91%
rename from upstream/pkg/internal/resolution/resolutionrequest.go
rename to upstream/pkg/resolution/resolver/internal/resolutionrequest.go
index a7cf0909ae0..be7f78f9a89 100644
--- a/upstream/pkg/internal/resolution/resolutionrequest.go
+++ b/upstream/pkg/resolution/resolver/internal/resolutionrequest.go
@@ -14,13 +14,13 @@
limitations under the License.
*/
-package resolution
+package internal
import (
"encoding/base64"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
@@ -43,7 +43,7 @@ func CreateResolutionRequestFailureStatus() *v1beta1.ResolutionRequestStatus {
Conditions: duckv1.Conditions{{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
- Reason: common.ReasonResolutionFailed,
+ Reason: resolutioncommon.ReasonResolutionFailed,
}},
},
}
diff --git a/upstream/pkg/resolution/resource/crd_resource.go b/upstream/pkg/resolution/resource/crd_resource.go
index 9e1281a63a7..90fd7653303 100644
--- a/upstream/pkg/resolution/resource/crd_resource.go
+++ b/upstream/pkg/resolution/resource/crd_resource.go
@@ -22,11 +22,11 @@ import (
"errors"
"fmt"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
+ pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned"
rrlisters "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
@@ -34,8 +34,6 @@ import (
// CRDRequester implements the Requester interface using
// ResolutionRequest CRDs.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.CRDRequester] instead.
type CRDRequester struct {
clientset rrclient.Interface
lister rrlisters.ResolutionRequestLister
@@ -45,8 +43,6 @@ type CRDRequester struct {
// ResolutionRequest CRD objects to mediate between the caller who wants a
// resource (e.g. Tekton Pipelines) and the responder who can fetch
// it (e.g. the gitresolver)
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.NewCRDRequester] instead.
func NewCRDRequester(clientset rrclient.Interface, lister rrlisters.ResolutionRequestLister) *CRDRequester {
return &CRDRequester{clientset, lister}
}
@@ -67,7 +63,7 @@ func (r *CRDRequester) Submit(ctx context.Context, resolver ResolverName, req Re
!apierrors.IsAlreadyExists(err) {
return nil, err
}
- return nil, common.ErrRequestInProgress
+ return nil, resolutioncommon.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() {
@@ -76,58 +72,52 @@ func (r *CRDRequester) Submit(ctx context.Context, resolver ResolverName, req Re
// that it doesn't get deleted until the caller is done
// with it. Use appendOwnerReference and then submit
// update to ResolutionRequest.
- return nil, common.ErrRequestInProgress
+ return nil, resolutioncommon.ErrRequestInProgress
}
if rr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() {
- return CrdIntoResource(rr), nil
+ return crdIntoResource(rr), nil
}
message := rr.Status.GetCondition(apis.ConditionSucceeded).GetMessage()
- err := common.NewError(common.ReasonResolutionFailed, errors.New(message))
+ err := resolutioncommon.NewError(resolutioncommon.ReasonResolutionFailed, errors.New(message))
return nil, err
}
func (r *CRDRequester) createResolutionRequest(ctx context.Context, resolver ResolverName, req Request) error {
- var owner metav1.OwnerReference
- if ownedReq, ok := req.(OwnedRequest); ok {
- owner = ownedReq.OwnerRef()
- }
- rr := CreateResolutionRequest(ctx, resolver, req.Name(), req.Namespace(), req.Params(), owner)
- _, err := r.clientset.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{})
- return err
-}
-
-func CreateResolutionRequest(ctx context.Context, resolver common.ResolverName, name, namespace string, params []v1.Param, ownerRef metav1.OwnerReference) *v1beta1.ResolutionRequest {
rr := &v1beta1.ResolutionRequest{
TypeMeta: metav1.TypeMeta{
APIVersion: "resolution.tekton.dev/v1beta1",
Kind: "ResolutionRequest",
},
ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
+ Name: req.Name(),
+ Namespace: req.Namespace(),
Labels: map[string]string{
- common.LabelKeyResolverType: string(resolver),
+ resolutioncommon.LabelKeyResolverType: string(resolver),
},
},
Spec: v1beta1.ResolutionRequestSpec{
- Params: params,
+ Params: req.Params(),
},
}
- appendOwnerReference(rr, ownerRef)
- return rr
+ appendOwnerReference(rr, req)
+ _, err := r.clientset.ResolutionV1beta1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{})
+ return err
}
-func appendOwnerReference(rr *v1beta1.ResolutionRequest, ownerRef metav1.OwnerReference) {
- isOwner := false
- for _, ref := range rr.ObjectMeta.OwnerReferences {
- if ownerRefsAreEqual(ref, ownerRef) {
- isOwner = true
+func appendOwnerReference(rr *v1beta1.ResolutionRequest, req Request) {
+ if ownedReq, ok := req.(OwnedRequest); ok {
+ newOwnerRef := ownedReq.OwnerRef()
+ isOwner := false
+ for _, ref := range rr.ObjectMeta.OwnerReferences {
+ if ownerRefsAreEqual(ref, newOwnerRef) {
+ isOwner = true
+ }
+ }
+ if !isOwner {
+ rr.ObjectMeta.OwnerReferences = append(rr.ObjectMeta.OwnerReferences, newOwnerRef)
}
- }
- if !isOwner {
- rr.ObjectMeta.OwnerReferences = append(rr.ObjectMeta.OwnerReferences, ownerRef)
}
}
@@ -141,21 +131,21 @@ func ownerRefsAreEqual(a, b metav1.OwnerReference) bool {
return a.APIVersion == b.APIVersion && a.Kind == b.Kind && a.Name == b.Name && a.UID == b.UID
}
-// ReadOnlyResolutionRequest is an opaque wrapper around ResolutionRequest
+// readOnlyResolutionRequest is an opaque wrapper around ResolutionRequest
// that provides the methods needed to read data from it using the
// Resource interface without exposing the underlying API
// object.
-type ReadOnlyResolutionRequest struct {
+type readOnlyResolutionRequest struct {
req *v1beta1.ResolutionRequest
}
-var _ common.ResolvedResource = ReadOnlyResolutionRequest{}
+var _ ResolvedResource = readOnlyResolutionRequest{}
-func CrdIntoResource(rr *v1beta1.ResolutionRequest) ReadOnlyResolutionRequest {
- return ReadOnlyResolutionRequest{req: rr}
+func crdIntoResource(rr *v1beta1.ResolutionRequest) readOnlyResolutionRequest {
+ return readOnlyResolutionRequest{req: rr}
}
-func (r ReadOnlyResolutionRequest) Annotations() map[string]string {
+func (r readOnlyResolutionRequest) Annotations() map[string]string {
status := r.req.GetStatus()
if status != nil && status.Annotations != nil {
annotationsCopy := map[string]string{}
@@ -167,7 +157,7 @@ func (r ReadOnlyResolutionRequest) Annotations() map[string]string {
return nil
}
-func (r ReadOnlyResolutionRequest) Data() ([]byte, error) {
+func (r readOnlyResolutionRequest) Data() ([]byte, error) {
encodedData := r.req.Status.ResolutionRequestStatusFields.Data
decodedBytes, err := base64.StdEncoding.Strict().DecodeString(encodedData)
if err != nil {
@@ -176,6 +166,6 @@ func (r ReadOnlyResolutionRequest) Data() ([]byte, error) {
return decodedBytes, nil
}
-func (r ReadOnlyResolutionRequest) RefSource() *v1.RefSource {
+func (r readOnlyResolutionRequest) RefSource() *pipelinev1.RefSource {
return r.req.Status.RefSource
}
diff --git a/upstream/pkg/resolution/resource/crd_resource_test.go b/upstream/pkg/resolution/resource/crd_resource_test.go
index c63a3564977..da5a06fac38 100644
--- a/upstream/pkg/resolution/resource/crd_resource_test.go
+++ b/upstream/pkg/resolution/resource/crd_resource_test.go
@@ -25,11 +25,10 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
- "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/pkg/resolution/resource"
"github.com/tektoncd/pipeline/test"
"github.com/tektoncd/pipeline/test/diff"
- resolution "github.com/tektoncd/pipeline/test/resolution"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/logging"
_ "knative.dev/pkg/system/testing" // Setup system.Namespace()
@@ -145,7 +144,7 @@ conditions:
testCases := []struct {
name string
- inputRequest *resolution.RawRequest
+ inputRequest *test.RawRequest
inputResolutionRequest *v1beta1.ResolutionRequest
expectedResolutionRequest *v1beta1.ResolutionRequest
expectedResolvedResource *v1beta1.ResolutionRequest
@@ -157,7 +156,7 @@ conditions:
inputResolutionRequest: nil,
expectedResolutionRequest: createdRR.DeepCopy(),
expectedResolvedResource: nil,
- expectedErr: common.ErrRequestInProgress,
+ expectedErr: resolutioncommon.ErrRequestInProgress,
},
{
name: "resolution request exist and status is unknown",
@@ -165,7 +164,7 @@ conditions:
inputResolutionRequest: unknownRR.DeepCopy(),
expectedResolutionRequest: nil,
expectedResolvedResource: nil,
- expectedErr: common.ErrRequestInProgress,
+ expectedErr: resolutioncommon.ErrRequestInProgress,
},
{
name: "resolution request exist and status is succeeded",
@@ -189,7 +188,7 @@ conditions:
inputResolutionRequest: failedRR.DeepCopy(),
expectedResolutionRequest: nil,
expectedResolvedResource: nil,
- expectedErr: common.NewError(common.ReasonResolutionFailed, errors.New("error message")),
+ expectedErr: resolutioncommon.NewError(resolutioncommon.ReasonResolutionFailed, errors.New("error message")),
},
}
@@ -205,7 +204,7 @@ conditions:
ctx := testAssets.Ctx
clients := testAssets.Clients
- resolver := common.ResolverName("git")
+ resolver := resolutioncommon.ResolverName("git")
crdRequester := resource.NewCRDRequester(clients.ResolutionRequests, testAssets.Informers.ResolutionRequest.Lister())
requestWithOwner := &ownerRequest{
Request: tc.inputRequest.Request(),
@@ -236,7 +235,7 @@ conditions:
if err != nil {
t.Errorf("unexpected error decoding expected resource data: %v", err)
}
- expectedResolvedResource := resolution.NewResolvedResource(data, rr.Status.Annotations, rr.Status.RefSource, nil)
+ expectedResolvedResource := test.NewResolvedResource(data, rr.Status.Annotations, rr.Status.RefSource, nil)
assertResolvedResourceEqual(t, expectedResolvedResource, resolvedResource)
}
@@ -256,7 +255,7 @@ conditions:
}
type ownerRequest struct {
- common.Request
+ resolutioncommon.Request
ownerRef metav1.OwnerReference
}
@@ -264,9 +263,9 @@ func (r *ownerRequest) OwnerRef() metav1.OwnerReference {
return r.ownerRef
}
-func mustParseRawRequest(t *testing.T, yamlStr string) *resolution.RawRequest {
+func mustParseRawRequest(t *testing.T, yamlStr string) *test.RawRequest {
t.Helper()
- output := &resolution.RawRequest{}
+ output := &test.RawRequest{}
if err := yaml.Unmarshal([]byte(yamlStr), output); err != nil {
t.Errorf("parsing raw request %s: %v", yamlStr, err)
}
@@ -300,7 +299,7 @@ func mustParseResolutionRequestStatus(t *testing.T, yamlStr string) *v1beta1.Res
return output
}
-func assertResolvedResourceEqual(t *testing.T, expected, actual common.ResolvedResource) {
+func assertResolvedResourceEqual(t *testing.T, expected, actual resolutioncommon.ResolvedResource) {
t.Helper()
expectedBytes, err := expected.Data()
if err != nil {
diff --git a/upstream/pkg/resolution/resource/name.go b/upstream/pkg/resolution/resource/name.go
index 8557d824622..051eabc89d0 100644
--- a/upstream/pkg/resolution/resource/name.go
+++ b/upstream/pkg/resolution/resource/name.go
@@ -21,77 +21,26 @@ import (
"hash"
"hash/fnv"
"sort"
- "strings"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
- "k8s.io/apimachinery/pkg/util/validation"
- "knative.dev/pkg/kmeta"
)
-const (
- // ParamName is a param that explicitly assigns a name to the remote object
- ParamName = "name"
-
- // ParamURL is a param that hold the URL used for accesing the remote object
- ParamURL = "url"
-)
-
-//
-
-const maxLength = validation.DNS1123LabelMaxLength
-
-// GenerateDeterministicName makes a best-effort attempt to create a
-// unique but reproducible name for use in a Request. The returned value
-// will have the format {prefix}-{hash} where {prefix} is
-// given and {hash} is nameHasher(base) + nameHasher(param1) +
-// nameHasher(param2) + ...
-func GenerateDeterministicName(prefix, base string, params v1.Params) (string, error) {
- return GenerateDeterministicNameFromSpec(prefix, base, &v1beta1.ResolutionRequestSpec{Params: params})
-}
-
-// GetNameAndNamespace determines the name and namespace for a resource request.
-// It prioritizes explicit values, falling back to the owning object and "default" namespace.
-// If needed, it generates a deterministic name to prevent duplicate requests within a context.
-func GetNameAndNamespace(resolverName string, owner kmeta.OwnerRefable, name string, namespace string, req *v1beta1.ResolutionRequestSpec) (string, string, error) {
- if name == "" {
- name = owner.GetObjectMeta().GetName()
- namespace = owner.GetObjectMeta().GetNamespace()
- }
- if namespace == "" {
- namespace = "default"
- }
- // Generating a deterministic name for the resource request
- // prevents multiple requests being issued for the same
- // pipelinerun's pipelineRef or taskrun's taskRef.
- remoteResourceBaseName := namespace + "/" + name
- name, err := GenerateDeterministicNameFromSpec(resolverName, remoteResourceBaseName, req)
- if err != nil {
- return "", "", fmt.Errorf("error generating name for taskrun %s/%s: %w", namespace, name, err)
- }
- return name, namespace, nil
-}
-
// nameHasher returns the hash.Hash to use when generating names.
func nameHasher() hash.Hash {
return fnv.New128a()
}
-// GenerateDeterministicNameFromSpec makes a best-effort attempt to create a
+// GenerateDeterministicName makes a best-effort attempt to create a
// unique but reproducible name for use in a Request. The returned value
// will have the format {prefix}-{hash} where {prefix} is
// given and {hash} is nameHasher(base) + nameHasher(param1) +
// nameHasher(param2) + ...
-func GenerateDeterministicNameFromSpec(prefix, base string, resolutionSpec *v1beta1.ResolutionRequestSpec) (string, error) {
+func GenerateDeterministicName(prefix, base string, params v1.Params) (string, error) {
hasher := nameHasher()
if _, err := hasher.Write([]byte(base)); err != nil {
return "", err
}
- if resolutionSpec == nil {
- return fmt.Sprintf("%s-%x", prefix, hasher.Sum(nil)), nil
- }
- params := resolutionSpec.Params
sortedParams := make(v1.Params, len(params))
for i := range params {
sortedParams[i] = *params[i].DeepCopy()
@@ -118,39 +67,5 @@ func GenerateDeterministicNameFromSpec(prefix, base string, resolutionSpec *v1be
}
}
}
- if len(resolutionSpec.URL) > 0 {
- if _, err := hasher.Write([]byte(resolutionSpec.URL)); err != nil {
- return "", err
- }
- }
- name := fmt.Sprintf("%s-%x", prefix, hasher.Sum(nil))
- if maxLength > len(name) {
- return name, nil
- }
- return name[:strings.LastIndex(name[:maxLength], " ")], nil
-}
-
-// GenerateErrorLogString makes a best effort attempt to get the name of the task
-// when a resolver error occurred. The TaskRef name does not have to be set, where
-// the specific resolver gets the name from the parameters.
-func GenerateErrorLogString(resolverType string, params v1.Params) string {
- paramString := fmt.Sprintf("resolver type %s\n", resolverType)
- for _, p := range params {
- if p.Name == ParamName {
- name := p.Value.StringVal
- if p.Value.Type != v1.ParamTypeString {
- asJSON, err := p.Value.MarshalJSON()
- if err != nil {
- paramString += fmt.Sprintf("name could not be marshalled: %s\n", err.Error())
- continue
- }
- name = string(asJSON)
- }
- paramString += fmt.Sprintf("name = %s\n", name)
- }
- if p.Name == ParamURL {
- paramString += fmt.Sprintf("url = %s\n", p.Value.StringVal)
- }
- }
- return paramString
+ return fmt.Sprintf("%s-%x", prefix, hasher.Sum(nil)), nil
}
diff --git a/upstream/pkg/resolution/resource/name_test.go b/upstream/pkg/resolution/resource/name_test.go
index 738d84cc47d..1a90a0aaf18 100644
--- a/upstream/pkg/resolution/resource/name_test.go
+++ b/upstream/pkg/resolution/resource/name_test.go
@@ -17,11 +17,9 @@ limitations under the License.
package resource_test
import (
- "strings"
"testing"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1"
"github.com/tektoncd/pipeline/pkg/resolution/resource"
)
@@ -115,215 +113,3 @@ func TestGenerateDeterministicName(t *testing.T) {
})
}
}
-
-func TestGenerateDeterministicNameFromSpec(t *testing.T) {
- type args struct {
- prefix string
- base string
- params []v1.Param
- url string
- }
- golden := args{
- prefix: "prefix",
- base: "base",
- params: []v1.Param{
- {
- Name: "string-param",
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "value1",
- },
- },
- {
- Name: "array-param",
- Value: v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{"value1", "value2"},
- },
- },
- {
- Name: "object-param",
- Value: v1.ParamValue{
- Type: v1.ParamTypeObject,
- ObjectVal: map[string]string{"key": "value"},
- },
- },
- },
- url: "https://foo/bar",
- }
- tests := []struct {
- name string
- args args
- want string
- wantErr bool
- }{
- {
- name: "only contains prefix",
- args: args{
- prefix: golden.prefix,
- },
- want: "prefix-6c62272e07bb014262b821756295c58d",
- },
- {
- name: "only contains base",
- args: args{
- base: golden.base,
- },
- want: "-6989337ae0757277b806e97e86444ef0",
- },
- {
- name: "only contains url",
- args: args{
- url: golden.url,
- },
- want: "-dcfaf53735f4a84a3e319e17352940b4",
- },
- {
- name: "only contains params",
- args: args{
- params: golden.params,
- },
- want: "-52921b17d3c2930a34419c618d6af0e9",
- },
- {
- name: "params with different order should generate same hash",
- args: args{
- params: []v1.Param{
- golden.params[2],
- golden.params[1],
- golden.params[0],
- },
- },
- want: "-52921b17d3c2930a34419c618d6af0e9",
- },
- {
- name: "contain all fields",
- args: golden,
- want: "prefix-ff25bd24688ab610bdc530a5ab3aabbd",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- resolutionSpec := &v1beta1.ResolutionRequestSpec{
- Params: tt.args.params,
- URL: tt.args.url,
- }
- got, err := resource.GenerateDeterministicNameFromSpec(tt.args.prefix, tt.args.base, resolutionSpec)
- if (err != nil) != tt.wantErr {
- t.Errorf("GenerateDeterministicNameFromSpec() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("GenerateDeterministicNameFromSpec() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGenerateErrorLogString(t *testing.T) {
- tests := []struct {
- resolverType string
- name string
- url string
- err string
- params []v1.Param
- isPresent bool
- }{
- {
- name: "foo",
- url: "https://bar",
- resolverType: "git",
- isPresent: true,
- params: []v1.Param{
- {
- Name: resource.ParamName,
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "foo",
- },
- },
- {
- Name: resource.ParamURL,
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "https://bar",
- },
- },
- },
- },
- {
- name: "foo",
- url: "https://bar",
- resolverType: "",
- err: "name could not be marshalled",
- params: []v1.Param{},
- },
- {
- name: "goo",
- resolverType: "bundle",
- isPresent: true,
- params: []v1.Param{
- {
- Name: resource.ParamName,
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "goo",
- },
- },
- },
- },
- {
- name: "goo-array",
- resolverType: "bundle",
- isPresent: true,
- params: []v1.Param{
- {
- Name: resource.ParamName,
- Value: v1.ParamValue{
- Type: v1.ParamTypeArray,
- ArrayVal: []string{resource.ParamName, "goo-array"},
- },
- },
- },
- },
- {
- name: "hoo",
- resolverType: "cluster",
- err: "name could not be marshalled",
- isPresent: true,
- params: []v1.Param{
- {
- Name: resource.ParamName,
- Value: v1.ParamValue{
- Type: v1.ParamTypeString,
- StringVal: "hoo",
- },
- },
- {
- Name: resource.ParamName,
- Value: v1.ParamValue{
- Type: v1.ParamType("foo"),
- },
- },
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got := resource.GenerateErrorLogString(tt.resolverType, tt.params)
- if strings.Contains(got, tt.name) != tt.isPresent {
- t.Errorf("name %s presence in %s should be %v", tt.name, got, tt.isPresent)
- }
- if strings.Contains(got, tt.url) != tt.isPresent {
- t.Errorf("url %s presence in %s should be %v", tt.url, got, tt.isPresent)
- }
- if strings.Contains(got, tt.err) != tt.isPresent {
- t.Errorf("err %s presence in %s should be %v", tt.err, got, tt.isPresent)
- }
- // should always have resolver type
- if !strings.Contains(got, tt.resolverType) {
- t.Errorf("type %s not in %s", tt.resolverType, got)
- }
- })
- }
-}
diff --git a/upstream/pkg/resolution/resource/request.go b/upstream/pkg/resolution/resource/request.go
index a3d8a967be5..9e0f3e194e7 100644
--- a/upstream/pkg/resolution/resource/request.go
+++ b/upstream/pkg/resolution/resource/request.go
@@ -21,8 +21,6 @@ import v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
var _ Request = &BasicRequest{}
// BasicRequest holds the fields needed to submit a new resource request.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.BasicRequest] instead.
type BasicRequest struct {
name string
namespace string
@@ -31,12 +29,12 @@ type BasicRequest struct {
// NewRequest returns an instance of a BasicRequest with the given name,
// namespace and params.
-//
-// Deprecated: Use [github.com/tektoncd/pipeline/pkg/remoteresolution/resource.NewRequest] instead.
func NewRequest(name, namespace string, params v1.Params) Request {
return &BasicRequest{name, namespace, params}
}
+var _ Request = &BasicRequest{}
+
// Name returns the name attached to the request
func (req *BasicRequest) Name() string {
return req.name
diff --git a/upstream/pkg/result/result.go b/upstream/pkg/result/result.go
index 6506074fd1d..e3d66b596ff 100644
--- a/upstream/pkg/result/result.go
+++ b/upstream/pkg/result/result.go
@@ -36,11 +36,8 @@ const (
// StepResultType default step result value
StepResultType ResultType = 4
- // StepArtifactsResultType default step artifacts result value
- StepArtifactsResultType ResultType = 5
-
- // TaskRunArtifactsResultType default taskRun artifacts result value
- TaskRunArtifactsResultType ResultType = 6
+ // ArtifactsResultType default artifacts result value
+ ArtifactsResultType ResultType = 5
)
// RunResult is used to write key/value pairs to TaskRun pod termination messages.
@@ -94,10 +91,8 @@ func (r *ResultType) UnmarshalJSON(data []byte) error {
*r = TaskRunResultType
case "InternalTektonResult":
*r = InternalTektonResultType
- case "StepArtifactsResult":
- *r = StepArtifactsResultType
- case "TaskRunArtifactsResult":
- *r = TaskRunArtifactsResultType
+ case "ArtifactsResult":
+ *r = ArtifactsResultType
default:
*r = UnknownResultType
}
diff --git a/upstream/pkg/result/result_test.go b/upstream/pkg/result/result_test.go
index fbd26b3712e..5b96acb2041 100644
--- a/upstream/pkg/result/result_test.go
+++ b/upstream/pkg/result/result_test.go
@@ -29,38 +29,28 @@ func TestRunResult_UnmarshalJSON(t *testing.T) {
name string
data string
pr RunResult
- }{
- {
- name: "type defined as string - TaskRunResult",
- data: "{\"key\":\"resultName\",\"value\":\"resultValue\", \"type\": \"TaskRunResult\"}",
- pr: RunResult{Key: "resultName", Value: "resultValue", ResultType: TaskRunResultType},
- },
- {
- name: "type defined as string - StepResult",
- data: "{\"key\":\"resultName\",\"value\":\"resultValue\", \"type\": \"StepResult\"}",
- pr: RunResult{Key: "resultName", Value: "resultValue", ResultType: StepResultType},
- },
+ }{{
+ name: "type defined as string - TaskRunResult",
+ data: "{\"key\":\"resultName\",\"value\":\"resultValue\", \"type\": \"TaskRunResult\"}",
+ pr: RunResult{Key: "resultName", Value: "resultValue", ResultType: TaskRunResultType},
+ }, {
+ name: "type defined as string - StepResult",
+ data: "{\"key\":\"resultName\",\"value\":\"resultValue\", \"type\": \"StepResult\"}",
+ pr: RunResult{Key: "resultName", Value: "resultValue", ResultType: StepResultType},
+ },
{
name: "type defined as string - InternalTektonResult",
data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": \"InternalTektonResult\"}",
pr: RunResult{Key: "resultName", Value: "", ResultType: InternalTektonResultType},
- },
- {
- name: "type defined as string - StepArtifactsResultType",
- data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": \"StepArtifactsResult\"}",
- pr: RunResult{Key: "resultName", Value: "", ResultType: StepArtifactsResultType},
- },
- {
- name: "type defined as string - TaskRunArtifactResult",
- data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": \"TaskRunArtifactsResult\"}",
- pr: RunResult{Key: "resultName", Value: "", ResultType: TaskRunArtifactsResultType},
- },
- {
+ }, {
+ name: "type defined as string - ArtifactsResult",
+ data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": \"ArtifactsResult\"}",
+ pr: RunResult{Key: "resultName", Value: "", ResultType: ArtifactsResultType},
+ }, {
name: "type defined as int",
data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": 1}",
pr: RunResult{Key: "resultName", Value: "", ResultType: TaskRunResultType},
- },
- }
+ }}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
@@ -69,7 +59,7 @@ func TestRunResult_UnmarshalJSON(t *testing.T) {
t.Errorf("Unexpected error when unmarshalling the json into RunResult")
}
if d := cmp.Diff(tc.pr, pipRes); d != "" {
- t.Error(diff.PrintWantGot(d))
+ t.Errorf(diff.PrintWantGot(d))
}
})
}
diff --git a/upstream/pkg/spire/controller.go b/upstream/pkg/spire/controller.go
index 33d5203fddf..db7fbe40f7a 100644
--- a/upstream/pkg/spire/controller.go
+++ b/upstream/pkg/spire/controller.go
@@ -231,15 +231,10 @@ func (sc *spireControllerAPIClient) CreateEntries(ctx context.Context, tr *v1bet
var errCodes []int32
for _, r := range resp.GetResults() {
- statusCode := r.GetStatus().GetCode()
- if statusCode < 0 {
- return fmt.Errorf("statusCode overflows uint32: %d", statusCode)
- }
- code := codes.Code(statusCode)
-
- if code != codes.AlreadyExists && code != codes.OK {
+ if codes.Code(r.GetStatus().GetCode()) != codes.AlreadyExists &&
+ codes.Code(r.GetStatus().GetCode()) != codes.OK {
errPaths = append(errPaths, r.GetEntry().GetSpiffeId().GetPath())
- errCodes = append(errCodes, statusCode)
+ errCodes = append(errCodes, r.GetStatus().GetCode())
}
}
@@ -301,15 +296,10 @@ func (sc *spireControllerAPIClient) DeleteEntry(ctx context.Context, tr *v1beta1
var errCodes []int32
for _, r := range resp.GetResults() {
- statusCode := r.GetStatus().GetCode()
- if statusCode < 0 {
- return fmt.Errorf("statusCode overflows uint32: %d", statusCode)
- }
- code := codes.Code(statusCode)
-
- if code != codes.NotFound && code != codes.OK {
+ if codes.Code(r.GetStatus().GetCode()) != codes.NotFound &&
+ codes.Code(r.GetStatus().GetCode()) != codes.OK {
errIds = append(errIds, r.GetId())
- errCodes = append(errCodes, statusCode)
+ errCodes = append(errCodes, r.GetStatus().GetCode())
}
}
diff --git a/upstream/pkg/taskrunmetrics/metrics.go b/upstream/pkg/taskrunmetrics/metrics.go
index 94a5dad73b7..9d26d3a8cb7 100644
--- a/upstream/pkg/taskrunmetrics/metrics.go
+++ b/upstream/pkg/taskrunmetrics/metrics.go
@@ -121,7 +121,6 @@ var (
type Recorder struct {
mutex sync.Mutex
initialized bool
- cfg *config.Metrics
ReportingPeriod time.Duration
@@ -145,15 +144,15 @@ var (
// to log the TaskRun related metrics
func NewRecorder(ctx context.Context) (*Recorder, error) {
once.Do(func() {
- cfg := config.FromContextOrDefaults(ctx)
r = &Recorder{
initialized: true,
- cfg: cfg.Metrics,
// Default to reporting metrics every 30s.
ReportingPeriod: 30 * time.Second,
}
+ cfg := config.FromContextOrDefaults(ctx)
+
errRegistering = viewRegister(cfg.Metrics)
if errRegistering != nil {
r.initialized = false
@@ -213,12 +212,6 @@ func viewRegister(cfg *config.Metrics) error {
}
}
- trCountViewTags := []tag.Key{statusTag}
- if cfg.CountWithReason {
- trCountViewTags = append(trCountViewTags, reasonTag)
- trunTag = append(trunTag, reasonTag)
- }
-
trDurationView = &view.View{
Description: trDuration.Description(),
Measure: trDuration,
@@ -232,6 +225,10 @@ func viewRegister(cfg *config.Metrics) error {
TagKeys: append([]tag.Key{statusTag, namespaceTag}, append(trunTag, prunTag...)...),
}
+ trCountViewTags := []tag.Key{statusTag}
+ if cfg.CountWithReason {
+ trCountViewTags = append(trCountViewTags, reasonTag)
+ }
trCountView = &view.View{
Description: trCount.Description(),
Measure: trCount,
@@ -271,21 +268,15 @@ func viewRegister(cfg *config.Metrics) error {
Aggregation: view.LastValue(),
}
- throttleViewTags := []tag.Key{}
- if cfg.ThrottleWithNamespace {
- throttleViewTags = append(throttleViewTags, namespaceTag)
- }
runningTRsThrottledByQuotaView = &view.View{
Description: runningTRsThrottledByQuota.Description(),
Measure: runningTRsThrottledByQuota,
Aggregation: view.LastValue(),
- TagKeys: throttleViewTags,
}
runningTRsThrottledByNodeView = &view.View{
Description: runningTRsThrottledByNode.Description(),
Measure: runningTRsThrottledByNode,
Aggregation: view.LastValue(),
- TagKeys: throttleViewTags,
}
podLatencyView = &view.View{
Description: podLatency.Description(),
@@ -326,8 +317,9 @@ func viewUnregister() {
)
}
-// OnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
-func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string, value interface{}) {
+// MetricsOnStore returns a function that checks if metrics are configured for a config.Store, and registers it if so
+func MetricsOnStore(logger *zap.SugaredLogger) func(name string,
+ value interface{}) {
return func(name string, value interface{}) {
if name == config.GetMetricsConfigName() {
cfg, ok := value.(*config.Metrics)
@@ -335,8 +327,6 @@ func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string, value int
logger.Error("Failed to do type insertion for extracting metrics config")
return
}
- r.updateConfig(cfg)
- // Update metrics according to the configuration
viewUnregister()
err := viewRegister(cfg)
if err != nil {
@@ -348,10 +338,8 @@ func OnStore(logger *zap.SugaredLogger, r *Recorder) func(name string, value int
}
func pipelinerunInsertTag(pipeline, pipelinerun string) []tag.Mutator {
- return []tag.Mutator{
- tag.Insert(pipelineTag, pipeline),
- tag.Insert(pipelinerunTag, pipelinerun),
- }
+ return []tag.Mutator{tag.Insert(pipelineTag, pipeline),
+ tag.Insert(pipelinerunTag, pipelinerun)}
}
func pipelineInsertTag(pipeline, pipelinerun string) []tag.Mutator {
@@ -359,10 +347,8 @@ func pipelineInsertTag(pipeline, pipelinerun string) []tag.Mutator {
}
func taskrunInsertTag(task, taskrun string) []tag.Mutator {
- return []tag.Mutator{
- tag.Insert(taskTag, task),
- tag.Insert(taskrunTag, taskrun),
- }
+ return []tag.Mutator{tag.Insert(taskTag, task),
+ tag.Insert(taskrunTag, taskrun)}
}
func taskInsertTag(task, taskrun string) []tag.Mutator {
@@ -379,15 +365,6 @@ func getTaskTagName(tr *v1.TaskRun) string {
case tr.Spec.TaskRef != nil && len(tr.Spec.TaskRef.Name) > 0:
taskName = tr.Spec.TaskRef.Name
case tr.Spec.TaskSpec != nil:
- pipelineTaskTable, hasPipelineTaskTable := tr.Labels[pipeline.PipelineTaskLabelKey]
- if hasPipelineTaskTable && len(pipelineTaskTable) > 0 {
- taskName = pipelineTaskTable
- }
- case tr.Spec.TaskRef != nil && tr.Spec.TaskRef.Kind == v1.ClusterTaskRefKind:
- clusterTaskLabel, hasClusterTaskLabel := tr.Labels[pipeline.ClusterTaskLabelKey]
- if hasClusterTaskLabel && len(clusterTaskLabel) > 0 {
- taskName = clusterTaskLabel
- }
default:
if len(tr.Labels) > 0 {
taskLabel, hasTaskLabel := tr.Labels[pipeline.TaskLabelKey]
@@ -400,13 +377,6 @@ func getTaskTagName(tr *v1.TaskRun) string {
return taskName
}
-func (r *Recorder) updateConfig(cfg *config.Metrics) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
-
- r.cfg = cfg
-}
-
// DurationAndCount logs the duration of TaskRun execution and
// count for number of TaskRuns succeed or failed
// returns an error if its failed to log the metrics
@@ -472,43 +442,22 @@ func (r *Recorder) RunningTaskRuns(ctx context.Context, lister listers.TaskRunLi
return err
}
- addNamespaceLabelToQuotaThrottleMetric := r.cfg != nil && r.cfg.ThrottleWithNamespace
-
var runningTrs int
- trsThrottledByQuota := map[string]int{}
- trsThrottledByQuotaCount := 0
- trsThrottledByNode := map[string]int{}
- trsThrottledByNodeCount := 0
+ var trsThrottledByQuota int
+ var trsThrottledByNode int
var trsWaitResolvingTaskRef int
for _, pr := range trs {
- // initialize metrics with namespace tag to zero if unset; will then update as needed below
- _, ok := trsThrottledByQuota[pr.Namespace]
- if !ok {
- trsThrottledByQuota[pr.Namespace] = 0
- }
- _, ok = trsThrottledByNode[pr.Namespace]
- if !ok {
- trsThrottledByNode[pr.Namespace] = 0
- }
-
if pr.IsDone() {
continue
}
runningTrs++
-
succeedCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
if succeedCondition != nil && succeedCondition.Status == corev1.ConditionUnknown {
switch succeedCondition.Reason {
case pod.ReasonExceededResourceQuota:
- trsThrottledByQuotaCount++
- cnt := trsThrottledByQuota[pr.Namespace]
- cnt++
- trsThrottledByQuota[pr.Namespace] = cnt
+ trsThrottledByQuota++
case pod.ReasonExceededNodeResources:
- trsThrottledByNodeCount++
- cnt := trsThrottledByNode[pr.Namespace]
- cnt++
- trsThrottledByNode[pr.Namespace] = cnt
+ trsThrottledByNode++
case v1.TaskRunReasonResolvingTaskRef:
trsWaitResolvingTaskRef++
}
@@ -521,32 +470,12 @@ func (r *Recorder) RunningTaskRuns(ctx context.Context, lister listers.TaskRunLi
}
metrics.Record(ctx, runningTRsCount.M(float64(runningTrs)))
metrics.Record(ctx, runningTRs.M(float64(runningTrs)))
+ metrics.Record(ctx, runningTRsThrottledByNodeCount.M(float64(trsThrottledByNode)))
+ metrics.Record(ctx, runningTRsThrottledByQuotaCount.M(float64(trsThrottledByQuota)))
metrics.Record(ctx, runningTRsWaitingOnTaskResolutionCount.M(float64(trsWaitResolvingTaskRef)))
- metrics.Record(ctx, runningTRsThrottledByQuotaCount.M(float64(trsThrottledByQuotaCount)))
- metrics.Record(ctx, runningTRsThrottledByNodeCount.M(float64(trsThrottledByNodeCount)))
+ metrics.Record(ctx, runningTRsThrottledByNode.M(float64(trsThrottledByNode)))
+ metrics.Record(ctx, runningTRsThrottledByQuota.M(float64(trsThrottledByQuota)))
- for ns, cnt := range trsThrottledByQuota {
- var mutators []tag.Mutator
- if addNamespaceLabelToQuotaThrottleMetric {
- mutators = []tag.Mutator{tag.Insert(namespaceTag, ns)}
- }
- ctx, err := tag.New(ctx, mutators...)
- if err != nil {
- return err
- }
- metrics.Record(ctx, runningTRsThrottledByQuota.M(float64(cnt)))
- }
- for ns, cnt := range trsThrottledByNode {
- var mutators []tag.Mutator
- if addNamespaceLabelToQuotaThrottleMetric {
- mutators = []tag.Mutator{tag.Insert(namespaceTag, ns)}
- }
- ctx, err := tag.New(ctx, mutators...)
- if err != nil {
- return err
- }
- metrics.Record(ctx, runningTRsThrottledByNode.M(float64(cnt)))
- }
return nil
}
@@ -594,10 +523,8 @@ func (r *Recorder) RecordPodLatency(ctx context.Context, pod *corev1.Pod, tr *v1
ctx, err := tag.New(
ctx,
- append([]tag.Mutator{
- tag.Insert(namespaceTag, tr.Namespace),
- tag.Insert(podTag, pod.Name),
- },
+ append([]tag.Mutator{tag.Insert(namespaceTag, tr.Namespace),
+ tag.Insert(podTag, pod.Name)},
r.insertTaskTag(taskName, tr.Name)...)...)
if err != nil {
return err
diff --git a/upstream/pkg/taskrunmetrics/metrics_test.go b/upstream/pkg/taskrunmetrics/metrics_test.go
index daf8342dbce..7a23478d21d 100644
--- a/upstream/pkg/taskrunmetrics/metrics_test.go
+++ b/upstream/pkg/taskrunmetrics/metrics_test.go
@@ -44,7 +44,7 @@ var (
completionTime = metav1.NewTime(startTime.Time.Add(time.Minute))
)
-func getConfigContext(countWithReason, throttleWithNamespace bool) context.Context {
+func getConfigContext(countWithReason bool) context.Context {
ctx := context.Background()
cfg := &config.Config{
Metrics: &config.Metrics{
@@ -53,7 +53,6 @@ func getConfigContext(countWithReason, throttleWithNamespace bool) context.Conte
DurationTaskrunType: config.DefaultDurationTaskrunType,
DurationPipelinerunType: config.DefaultDurationPipelinerunType,
CountWithReason: countWithReason,
- ThrottleWithNamespace: throttleWithNamespace,
},
}
return config.ToContext(ctx, cfg)
@@ -80,19 +79,19 @@ func TestUninitializedMetrics(t *testing.T) {
}
}
-func TestOnStore(t *testing.T) {
+func TestMetricsOnStore(t *testing.T) {
log := zap.NewExample()
defer log.Sync()
logger := log.Sugar()
- ctx := getConfigContext(false, false)
+ ctx := getConfigContext(false)
metrics, err := NewRecorder(ctx)
if err != nil {
t.Fatalf("NewRecorder: %v", err)
}
// We check that there's no change when incorrect config is passed
- OnStore(logger, metrics)(config.GetMetricsConfigName(), &config.Store{})
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), &config.Store{})
// Comparing function assign to struct with the one which should yield same value
if reflect.ValueOf(metrics.insertTaskTag).Pointer() != reflect.ValueOf(taskrunInsertTag).Pointer() {
t.Fatalf("metrics recorder shouldn't change during this OnStore call")
@@ -107,7 +106,7 @@ func TestOnStore(t *testing.T) {
}
// We test that there's no change when incorrect values in configmap is passed
- OnStore(logger, metrics)(config.GetMetricsConfigName(), cfg)
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), cfg)
// Comparing function assign to struct with the one which should yield same value
if reflect.ValueOf(metrics.insertTaskTag).Pointer() != reflect.ValueOf(taskrunInsertTag).Pointer() {
t.Fatalf("metrics recorder shouldn't change during this OnStore call")
@@ -121,7 +120,7 @@ func TestOnStore(t *testing.T) {
DurationPipelinerunType: config.DurationPipelinerunTypeLastValue,
}
- OnStore(logger, metrics)(config.GetMetricsConfigName(), cfg)
+ MetricsOnStore(logger)(config.GetMetricsConfigName(), cfg)
if reflect.ValueOf(metrics.insertTaskTag).Pointer() != reflect.ValueOf(nilInsertTag).Pointer() {
t.Fatalf("metrics recorder didn't change during OnStore call")
}
@@ -172,78 +171,6 @@ func TestRecordTaskRunDurationCount(t *testing.T) {
expectedCount: 1,
beforeCondition: nil,
countWithReason: false,
- }, {
- name: "for succeeded taskrun ref cluster task",
- taskRun: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{Name: "taskrun-1", Namespace: "ns", Labels: map[string]string{
- pipeline.PipelineTaskLabelKey: "task-1",
- }},
- Spec: v1.TaskRunSpec{
- TaskSpec: &v1.TaskSpec{},
- },
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }},
- },
- TaskRunStatusFields: v1.TaskRunStatusFields{
- StartTime: &startTime,
- CompletionTime: &completionTime,
- },
- },
- },
- metricName: "taskrun_duration_seconds",
- expectedDurationTags: map[string]string{
- "task": "task-1",
- "taskrun": "taskrun-1",
- "namespace": "ns",
- "status": "success",
- },
- expectedCountTags: map[string]string{
- "status": "success",
- },
- expectedDuration: 60,
- expectedCount: 1,
- beforeCondition: nil,
- countWithReason: false,
- }, {
- name: "for succeeded taskrun create by pipelinerun",
- taskRun: &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{Name: "taskrun-1", Namespace: "ns", Labels: map[string]string{
- pipeline.ClusterTaskLabelKey: "task-1",
- }},
- Spec: v1.TaskRunSpec{
- TaskRef: &v1.TaskRef{Kind: v1.ClusterTaskRefKind},
- },
- Status: v1.TaskRunStatus{
- Status: duckv1.Status{
- Conditions: duckv1.Conditions{{
- Type: apis.ConditionSucceeded,
- Status: corev1.ConditionTrue,
- }},
- },
- TaskRunStatusFields: v1.TaskRunStatusFields{
- StartTime: &startTime,
- CompletionTime: &completionTime,
- },
- },
- },
- metricName: "taskrun_duration_seconds",
- expectedDurationTags: map[string]string{
- "task": "task-1",
- "taskrun": "taskrun-1",
- "namespace": "ns",
- "status": "success",
- },
- expectedCountTags: map[string]string{
- "status": "success",
- },
- expectedDuration: 60,
- expectedCount: 1,
- beforeCondition: nil,
- countWithReason: false,
}, {
name: "for succeeded taskrun with before condition",
taskRun: &v1.TaskRun{
@@ -507,7 +434,6 @@ func TestRecordTaskRunDurationCount(t *testing.T) {
"task": "task-1",
"taskrun": "taskrun-1",
"namespace": "ns",
- "reason": "TaskRunImagePullFailed",
"status": "failed",
},
expectedCountTags: map[string]string{
@@ -522,7 +448,7 @@ func TestRecordTaskRunDurationCount(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
unregisterMetrics()
- ctx := getConfigContext(c.countWithReason, false)
+ ctx := getConfigContext(c.countWithReason)
metrics, err := NewRecorder(ctx)
if err != nil {
t.Fatalf("NewRecorder: %v", err)
@@ -577,7 +503,7 @@ func TestRecordRunningTaskRunsCount(t *testing.T) {
}
}
- ctx = getConfigContext(false, false)
+ ctx = getConfigContext(false)
metrics, err := NewRecorder(ctx)
if err != nil {
t.Fatalf("NewRecorder: %v", err)
@@ -597,7 +523,6 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
nodeCount float64
quotaCount float64
waitCount float64
- addNS bool
}{
{
status: corev1.ConditionTrue,
@@ -607,20 +532,10 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
status: corev1.ConditionTrue,
reason: pod.ReasonExceededResourceQuota,
},
- {
- status: corev1.ConditionTrue,
- reason: pod.ReasonExceededResourceQuota,
- addNS: true,
- },
{
status: corev1.ConditionTrue,
reason: pod.ReasonExceededNodeResources,
},
- {
- status: corev1.ConditionTrue,
- reason: pod.ReasonExceededNodeResources,
- addNS: true,
- },
{
status: corev1.ConditionTrue,
reason: v1.TaskRunReasonResolvingTaskRef,
@@ -655,18 +570,6 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
reason: pod.ReasonExceededNodeResources,
nodeCount: 3,
},
- {
- status: corev1.ConditionUnknown,
- reason: pod.ReasonExceededResourceQuota,
- quotaCount: 3,
- addNS: true,
- },
- {
- status: corev1.ConditionUnknown,
- reason: pod.ReasonExceededNodeResources,
- nodeCount: 3,
- addNS: true,
- },
{
status: corev1.ConditionUnknown,
reason: v1.TaskRunReasonResolvingTaskRef,
@@ -676,9 +579,9 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
unregisterMetrics()
ctx, _ := ttesting.SetupFakeContext(t)
informer := faketaskruninformer.Get(ctx)
- for range multiplier {
+ for i := 0; i < multiplier; i++ {
tr := &v1.TaskRun{
- ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("taskrun-"), Namespace: "test"},
+ ObjectMeta: metav1.ObjectMeta{Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("taskrun-")},
Status: v1.TaskRunStatus{
Status: duckv1.Status{
Conditions: duckv1.Conditions{{
@@ -694,7 +597,7 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
}
}
- ctx = getConfigContext(false, tc.addNS)
+ ctx = getConfigContext(false)
metrics, err := NewRecorder(ctx)
if err != nil {
t.Fatalf("NewRecorder: %v", err)
@@ -704,13 +607,7 @@ func TestRecordRunningTaskRunsThrottledCounts(t *testing.T) {
t.Errorf("RunningTaskRuns: %v", err)
}
metricstest.CheckLastValueData(t, "running_taskruns_throttled_by_quota_count", map[string]string{}, tc.quotaCount)
- nsMap := map[string]string{}
- if tc.addNS {
- nsMap = map[string]string{namespaceTag.Name(): "test"}
- }
- metricstest.CheckLastValueData(t, "running_taskruns_throttled_by_quota", nsMap, tc.quotaCount)
metricstest.CheckLastValueData(t, "running_taskruns_throttled_by_node_count", map[string]string{}, tc.nodeCount)
- metricstest.CheckLastValueData(t, "running_taskruns_throttled_by_node", nsMap, tc.nodeCount)
metricstest.CheckLastValueData(t, "running_taskruns_waiting_on_task_resolution_count", map[string]string{}, tc.waitCount)
}
}
@@ -841,7 +738,7 @@ func TestRecordPodLatency(t *testing.T) {
t.Run(td.name, func(t *testing.T) {
unregisterMetrics()
- ctx := getConfigContext(false, false)
+ ctx := getConfigContext(false)
metrics, err := NewRecorder(ctx)
if err != nil {
t.Fatalf("NewRecorder: %v", err)
diff --git a/upstream/releases.md b/upstream/releases.md
index fb34dd8fa1a..2a3660865a4 100644
--- a/upstream/releases.md
+++ b/upstream/releases.md
@@ -41,24 +41,12 @@ Further documentation available:
## Release
-### v0.64
-- **Latest Release**: [v0.64.0][v0.64-0] (2024-09-27) ([docs][v0.64-0-docs], [examples][v0.64-0-examples])
-- **Initial Release**: [v0.64.0][v0.64-0] (2024-09-27)
-- **End of Life**: 2024-10-26
-- **Patch Releases**: [v0.64.0][v0.64-0]
-
-### v0.62 (LTS)
-- **Latest Release**: [v0.62.2][v0.62-2] (2024-08-23) ([docs][v0.62-2-docs], [examples][v0.62-2-examples])
-- **Initial Release**: [v0.62.0][v0.62-0] (2024-07-23)
-- **End of Life**: 2025-07-23
-- **Patch Releases**: [v0.62.0][v0.62-0], [v0.62.1][v0.62-1], [v0.62.2][v0.62-2]
-
-### v0.59 (LTS)
+### v0.58
-- **Latest Release**: [v0.59.2][v0.59-2] (2024-07-04) ([docs][v0.59-2-docs], [examples][v0.59-2-examples])
-- **Initial Release**: [v0.59.0][v0.59-0] (2024-04-25)
-- **Estimated End of Life**: 2025-04-24
-- **Patch Releases**: [v0.59.0][v0.59-0], [v0.59.1][v0.59-1], [v0.59.2][v0.59-2]
+- **Latest Release**: [v0.58.0][v0.58-0] (2024-03-20) ([docs][v0.58-0-docs], [examples][v0.58-0-examples])
+- **Initial Release**: [v0.58.0][v0.58-0] (2024-03-20)
+- **End of Life**: 2024-04-20
+- **Patch Releases**: [v0.58.0][v0.58-0]
### v0.56 (LTS)
@@ -74,33 +62,22 @@ Further documentation available:
- **Estimated End of Life**: 2024-10-25
- **Patch Releases**: [v0.53.0][v0.53-0], [v0.53.1][v0.53-1], [v0.53.2][v0.53-2], [v0.53.3][v0.53-3], [v0.53.4][v0.53-4], [v0.53.5][v0.53-5]
+### v0.50 (LTS)
-## End of Life Releases
-
-### v0.63
-- **Latest Release**: [v0.63.0][v0.63-0] (2024-08-30) ([docs][v0.63-0-docs], [examples][v0.63-0-examples])
-- **Initial Release**: [v0.63.0][v0.63-0] (2024-08-30)
-- **End of Life**: 2024-09-27
-- **Patch Releases**: [v0.63.0][v0.63-0]
+- **Latest Release**: [v0.50.5][v0.50-5] (2023-11-16) ([docs][v0.50-5-docs], [examples][v0.50-5-examples])
+- **Initial Release**: [v0.50.0][v0.50-0] (2023-07-25)
+- **Estimated End of Life**: 2024-07-25
+- **Patch Releases**: [v0.50.0][v0.50-0] [v0.50.1][v0.50-1] [v0.50.2][v0.50-2] [v0.50.3][v0.50-3] [v0.50.4][v0.50-4] [v0.50.5][v0.50-5]
-### v0.61
-- **Latest Release**: [v0.61.0][v0.61-0] (2024-06-25) ([docs][v0.61-0-docs], [examples][v0.61-0-examples])
-- **Initial Release**: [v0.61.0][v0.61-0] (2024-06-25)
-- **End of Life**: 2024-07-25
-- **Patch Releases**: [v0.61.0][v0.61-0]
+### v0.47 (LTS)
-### v0.60
-- **Latest Release**: [v0.60.1][v0.60-1] (2024-05-28) ([docs][v0.60-1-docs], [examples][v0.60-1-examples])
-- **Initial Release**: [v0.60.0][v0.60-0] (2024-05-22)
-- **End of Life**: 2024-06-22
-- **Patch Releases**: [v0.60.0][v0.60-0], [v0.60.1][v0.60-1]
+- **Latest Release**: [v0.47.8][v0-47-8] (2024-04-05) ([docs][v0-47-8-docs], [examples][v0-47-8-examples])
+- **Initial Release**: [v0.47.0][v0-47-0] (2023-03-17)
+- **Estimated End of Life**: 2024-03-17
+- **Patch Releases**: [v0.47.0][v0-47-0], [v0.47.1][v0-47-1], [v0.47.2][v0-47-2], [v0.47.3][v0-47-3], [v0.47.4][v0-47-4], [v0.47.5][v0-47-5], [v0.47.6][v0-47-6], [v0.47.7][v0-47-7], [v0.47.8][v0-47-8]
-### v0.58
-- **Latest Release**: [v0.58.0][v0.58-0] (2024-03-20) ([docs][v0.58-0-docs], [examples][v0.58-0-examples])
-- **Initial Release**: [v0.58.0][v0.58-0] (2024-03-20)
-- **End of Life**: 2024-04-20
-- **Patch Releases**: [v0.58.0][v0.58-0]
+## End of Life Releases
### v0.57
@@ -130,13 +107,6 @@ Further documentation available:
- **End of Life**: 2023-09-17
- **Patch Releases**: [v0.51.0][v0.51-0]
-### v0.50 (LTS)
-
-- **Latest Release**: [v0.50.5][v0.50-5] (2023-11-16) ([docs][v0.50-5-docs], [examples][v0.50-5-examples])
-- **Initial Release**: [v0.50.0][v0.50-0] (2023-07-25)
-- **Estimated End of Life**: 2024-07-25
-- **Patch Releases**: [v0.50.0][v0.50-0] [v0.50.1][v0.50-1] [v0.50.2][v0.50-2] [v0.50.3][v0.50-3] [v0.50.4][v0.50-4] [v0.50.5][v0.50-5]
-
### v0.49
- **Latest Release**: [v0.49.0][v0-49-0] (2023-06-20) ([docs][v0-49-0-docs], [examples][v0-49-0-examples])
@@ -151,13 +121,6 @@ Further documentation available:
- **End of Life**: 2023-06-20
- **Patch Releases**: [v0.48.0][v0-48-0]
-### v0.47 (LTS)
-
-- **Latest Release**: [v0.47.8][v0-47-8] (2024-04-05) ([docs][v0-47-8-docs], [examples][v0-47-8-examples])
-- **Initial Release**: [v0.47.0][v0-47-0] (2023-03-17)
-- **Estimated End of Life**: 2024-03-17
-- **Patch Releases**: [v0.47.0][v0-47-0], [v0.47.1][v0-47-1], [v0.47.2][v0-47-2], [v0.47.3][v0-47-3], [v0.47.4][v0-47-4], [v0.47.5][v0-47-5], [v0.47.6][v0-47-6], [v0.47.7][v0-47-7], [v0.47.8][v0-47-8]
-
### v0.46
- **Latest Release**: [v0.46.0][v0-46-0] (2023-03-17) ([docs][v0-46-0-docs], [examples][v0-46-0-examples])
@@ -239,17 +202,6 @@ Older releases are EOL and available on [GitHub][tekton-pipeline-releases].
[release-notes-standards]:
https://github.com/tektoncd/community/blob/main/standards.md#release-notes
-[v0.64-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.64.0
-[v0.63-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.63.0
-[v0.62-2]: https://github.com/tektoncd/pipeline/releases/tag/v0.62.2
-[v0.62-1]: https://github.com/tektoncd/pipeline/releases/tag/v0.62.1
-[v0.62-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.62.0
-[v0.61-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.61.0
-[v0.60-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.60.0
-[v0.60-1]: https://github.com/tektoncd/pipeline/releases/tag/v0.60.1
-[v0.59-2]: https://github.com/tektoncd/pipeline/releases/tag/v0.59.2
-[v0.59-1]: https://github.com/tektoncd/pipeline/releases/tag/v0.59.1
-[v0.59-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.59.0
[v0.58-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.58.0
[v0.57-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.57.0
[v0.56-3]: https://github.com/tektoncd/pipeline/releases/tag/v0.56.3
@@ -309,17 +261,6 @@ Older releases are EOL and available on [GitHub][tekton-pipeline-releases].
[v0-37-5]: https://github.com/tektoncd/pipeline/releases/tag/v0.37.5
[v0-37-0]: https://github.com/tektoncd/pipeline/releases/tag/v0.37.0
-[v0.64-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.64.0/docs#tekton-pipelines
-[v0.63-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.63.0/docs#tekton-pipelines
-[v0.62-2-docs]: https://github.com/tektoncd/pipeline/tree/v0.62.2/docs#tekton-pipelines
-[v0.62-1-docs]: https://github.com/tektoncd/pipeline/tree/v0.62.1/docs#tekton-pipelines
-[v0.62-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.62.0/docs#tekton-pipelines
-[v0.61-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.61.0/docs#tekton-pipelines
-[v0.60-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.60.0/docs#tekton-pipelines
-[v0.60-1-docs]: https://github.com/tektoncd/pipeline/tree/v0.60.1/docs#tekton-pipelines
-[v0.59-2-docs]: https://github.com/tektoncd/pipeline/tree/v0.59.2/docs#tekton-pipelines
-[v0.59-1-docs]: https://github.com/tektoncd/pipeline/tree/v0.59.1/docs#tekton-pipelines
-[v0.59-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.59.0/docs#tekton-pipelines
[v0.58-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.58.0/docs#tekton-pipelines
[v0.57-0-docs]: https://github.com/tektoncd/pipeline/tree/v0.57.0/docs#tekton-pipelines
[v0.56-3-docs]: https://github.com/tektoncd/pipeline/tree/v0.56.3/docs#tekton-pipelines
@@ -342,17 +283,6 @@ Older releases are EOL and available on [GitHub][tekton-pipeline-releases].
[v0-38-4-docs]: https://github.com/tektoncd/pipeline/tree/v0.38.4/docs#tekton-pipelines
[v0-37-5-docs]: https://github.com/tektoncd/pipeline/tree/v0.37.5/docs#tekton-pipelines
-[v0.64-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.64.0/examples#examples
-[v0.63-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.63.0/examples#examples
-[v0.62-2-examples]: https://github.com/tektoncd/pipeline/tree/v0.62.2/examples#examples
-[v0.62-1-examples]: https://github.com/tektoncd/pipeline/tree/v0.62.1/examples#examples
-[v0.62-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.62.0/examples#examples
-[v0.61-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.61.0/examples#examples
-[v0.60-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.60.0/examples#examples
-[v0.60-1-examples]: https://github.com/tektoncd/pipeline/tree/v0.60.1/examples#examples
-[v0.59-2-examples]: https://github.com/tektoncd/pipeline/tree/v0.59.2/examples#examples
-[v0.59-1-examples]: https://github.com/tektoncd/pipeline/tree/v0.59.1/examples#examples
-[v0.59-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.59.0/examples#examples
[v0.58-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.58.0/examples#examples
[v0.57-0-examples]: https://github.com/tektoncd/pipeline/tree/v0.57.0/examples#examples
[v0.56-3-examples]: https://github.com/tektoncd/pipeline/tree/v0.56.3/examples#examples
diff --git a/upstream/tekton/bugfix-release.sh b/upstream/tekton/bugfix-release.sh
index dd3ac229528..004d7670190 100755
--- a/upstream/tekton/bugfix-release.sh
+++ b/upstream/tekton/bugfix-release.sh
@@ -41,18 +41,11 @@ echo "> Starting the release pipeline"
tkn pipeline start pipeline-release \
--serviceaccount=release-right-meow \
--param=gitRevision="${TEKTON_RELEASE_GIT_SHA}" \
- --param=imageRegistry=ghcr.io \
- --param=imageRegistryRegions="" \
- --param=imageRegistryPath=tektoncd/pipeline \
- --param=imageRegistryUser=tekton-robot \
--param=serviceAccountPath=release.json \
- --param serviceAccountImagesPath=credentials \
--param=versionTag="${TEKTON_VERSION}" \
--param=releaseBucket=gs://tekton-releases/pipeline \
- --param=koExtraArgs=" " \
--param=releaseAsLatest="false" \
--workspace name=release-secret,secret=release-secret \
- --workspace name=release-images-secret,secret=ghcr-creds \
--workspace name=workarea,volumeClaimTemplateFile=workspace-template.yaml --use-param-defaults --pipeline-timeout 3h --showlog
RELEASE_FILE=https://storage.googleapis.com/tekton-releases/pipeline/previous/${TEKTON_VERSION}/release.yaml
diff --git a/upstream/tekton/publish.yaml b/upstream/tekton/publish.yaml
index 3709eacb342..8a3ae9bbc4b 100644
--- a/upstream/tekton/publish.yaml
+++ b/upstream/tekton/publish.yaml
@@ -73,7 +73,7 @@ spec:
steps:
- name: container-registry-auth
- image: cgr.dev/chainguard/crane:latest-dev@sha256:1a9ad542fa38507436d881646db7248fa5b8257614b4819012021046a494f3d1
+ image: cgr.dev/chainguard/crane:latest-dev@sha256:b33c82b22a6cfb21e3db968fba5f426461f7540d7fa37048e2a6ffb9aaca7f19
script: |
#!/bin/sh
set -ex
@@ -92,7 +92,7 @@ spec:
cp ${DOCKER_CONFIG} /workspace/docker-config.json
- name: create-ko-yaml
- image: cgr.dev/chainguard/go:latest-dev@sha256:ac083e53ba09176079a651dc72d7cf4baac4b39581e79c26c2595963a28a53bc
+ image: cgr.dev/chainguard/go:latest-dev@sha256:35dc4adbb3b6fadafd60d0a004d06d706f2f6ed1511f5c24f22f92f8fe94f783
script: |
#!/bin/sh
set -ex
@@ -135,7 +135,7 @@ spec:
cat /workspace/.ko.yaml
- name: run-ko
- image: gcr.io/tekton-releases/dogfooding/ko@sha256:1c27e4e98ce4dccab532295d892db443078b36011438be183f8fccfe6651a8e6
+ image: gcr.io/tekton-releases/dogfooding/ko@sha256:bdcd596b40583f6f8316745e27d7ff327a0756fb6aee1251dfc0bdbd01b26c88
env:
- name: KO_DOCKER_REPO
value: $(params.imageRegistry)/$(params.imageRegistryPath)
@@ -198,7 +198,7 @@ spec:
sed -i -e 's/\(pipeline.tekton.dev\/release\): "devel"/\1: "$(params.versionTag)"/g' -e 's/\(app.kubernetes.io\/version\): "devel"/\1: "$(params.versionTag)"/g' -e 's/\(version\): "devel"/\1: "$(params.versionTag)"/g' ${OUTPUT_RELEASE_DIR}/release.yaml
sed -i -e 's/\(pipeline.tekton.dev\/release\): "devel"/\1: "$(params.versionTag)"/g' -e 's/\(app.kubernetes.io\/version\): "devel"/\1: "$(params.versionTag)"/g' -e 's/\(version\): "devel"/\1: "$(params.versionTag)"/g' ${OUTPUT_RELEASE_DIR}/release.notags.yaml
- name: koparse
- image: gcr.io/tekton-releases/dogfooding/koparse@sha256:cec528145eca7e66b3ea2e4459928b1c593336ece922302ef342912ed571a59b
+ image: gcr.io/tekton-releases/dogfooding/koparse@sha256:6b70f2d6fc1cc7849c5e65dcf404ee153653055799ceea511935bba7a27d3c44
script: |
set -ex
@@ -232,7 +232,7 @@ spec:
${PRESERVE_IMPORT_PATH} > /workspace/built_images
- name: tag-images
- image: cgr.dev/chainguard/crane:latest-dev@sha256:1a9ad542fa38507436d881646db7248fa5b8257614b4819012021046a494f3d1
+ image: cgr.dev/chainguard/crane:latest-dev@sha256:b33c82b22a6cfb21e3db968fba5f426461f7540d7fa37048e2a6ffb9aaca7f19
script: |
#!/bin/sh
set -ex
diff --git a/upstream/test/affinity_assistant_test.go b/upstream/test/affinity_assistant_test.go
index a40e1942324..e74942b299f 100644
--- a/upstream/test/affinity_assistant_test.go
+++ b/upstream/test/affinity_assistant_test.go
@@ -56,14 +56,14 @@ spec:
- name: my-workspace
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo hello foo
- name: bar
workspaces:
- name: my-workspace
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo hello bar
workspaces:
- name: my-workspace
@@ -151,14 +151,14 @@ spec:
- name: my-workspace
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo hello foo
- name: bar
workspaces:
- name: my-workspace2
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo hello bar
- name: double-ws
workspaces:
@@ -166,12 +166,12 @@ spec:
- name: my-workspace2
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo double-ws
- name: no-ws
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo no-ws
workspaces:
- name: my-workspace
diff --git a/upstream/test/artifacts_test.go b/upstream/test/artifacts_test.go
index 14bb523ea7b..39ca286b3aa 100644
--- a/upstream/test/artifacts_test.go
+++ b/upstream/test/artifacts_test.go
@@ -40,57 +40,43 @@ var (
}
)
-func TestSurfaceArtifacts(t *testing.T) {
- tests := []struct {
- desc string
- resultExtractionMethod string
- }{
- {
- desc: "surface artifacts through termination message",
- resultExtractionMethod: config.ResultExtractionMethodTerminationMessage},
- {
- desc: "surface artifacts through sidecar logs",
- resultExtractionMethod: config.ResultExtractionMethodSidecarLogs},
- }
-
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
- checkFlagsEnabled := requireAllGates(requireEnableStepArtifactsGate)
+func TestSurfaceArtifactsThroughTerminationMessage(t *testing.T) {
+ featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
+ checkFlagsEnabled := requireAllGates(requireEnableStepArtifactsGate)
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- c, namespace := setup(ctx, t)
- checkFlagsEnabled(ctx, t, c, "")
- previous := featureFlags.ResultExtractionMethod
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": tc.resultExtractionMethod,
- })
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ c, namespace := setup(ctx, t)
+ checkFlagsEnabled(ctx, t, c, "")
+ previous := featureFlags.ResultExtractionMethod
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": config.ResultExtractionMethodTerminationMessage,
+ })
- knativetest.CleanupOnInterrupt(func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }, t.Logf)
- defer func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }()
+ knativetest.CleanupOnInterrupt(func() {
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": previous,
+ })
+ tearDown(ctx, t, c, namespace)
+ }, t.Logf)
+ defer func() {
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": previous,
+ })
+ tearDown(ctx, t, c, namespace)
+ }()
- taskRunName := helpers.ObjectNameForTest(t)
+ taskRunName := helpers.ObjectNameForTest(t)
- fqImageName := getTestImage(busyboxImage)
+ fqImageName := getTestImage(busyboxImage)
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := simpleArtifactProducerTask(t, namespace, fqImageName)
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
+ t.Logf("Creating Task and TaskRun in namespace %s", namespace)
+ task := simpleArtifactProducerTask(t, namespace, fqImageName)
+ if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create Task: %s", err)
+ }
+ taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
@@ -98,33 +84,31 @@ spec:
taskRef:
name: %s
`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
+ if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create TaskRun: %s", err)
+ }
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
+ if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
+ t.Errorf("Error waiting for TaskRun to finish: %s", err)
+ }
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
- if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "source",
- Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
- Uri: "git:jjjsss",
- }},
- }}, taskrun.Status.Steps[0].Inputs); d != "" {
- t.Fatalf(`The expected stepState Inputs does not match created taskrun stepState Inputs. Here is the diff: %v`, d)
- }
- if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "image", BuildOutput: true,
- Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2", "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"},
- Uri: "pkg:balba",
- }},
- }}, taskrun.Status.Steps[0].Outputs); d != "" {
- t.Fatalf(`The expected stepState Outputs does not match created taskrun stepState Outputs. Here is the diff: %v`, d)
- }
- })
+ taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
+ }
+ if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "input-artifacts",
+ Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
+ Uri: "git:jjjsss",
+ }},
+ }}, taskrun.Status.Steps[0].Inputs); d != "" {
+ t.Fatalf(`The expected stepState Inputs does not match created taskrun stepState Inputs. Here is the diff: %v`, d)
+ }
+ if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "build-result",
+ Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2", "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"},
+ Uri: "pkg:balba",
+ }},
+ }}, taskrun.Status.Steps[0].Outputs); d != "" {
+ t.Fatalf(`The expected stepState Outputs does not match created taskrun stepState Outputs. Here is the diff: %v`, d)
}
}
@@ -191,7 +175,7 @@ spec:
}}, taskrun.Status.Steps[0].Inputs); d != "" {
t.Fatalf(`The expected stepState Inputs does not match created taskrun stepState Inputs. Here is the diff: %v`, d)
}
- if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "build-result", BuildOutput: false,
+ if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "build-result",
Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2", "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"},
Uri: "pkg:balba",
}},
@@ -201,67 +185,53 @@ spec:
}
func TestConsumeArtifacts(t *testing.T) {
- tests := []struct {
- desc string
- resultExtractionMethod string
- }{
- {
- desc: "surface artifacts through termination message",
- resultExtractionMethod: config.ResultExtractionMethodTerminationMessage},
- {
- desc: "surface artifacts through sidecar logs",
- resultExtractionMethod: config.ResultExtractionMethodSidecarLogs},
- }
-
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
- checkFlagsEnabled := requireAllGates(map[string]string{
- "enable-artifacts": "true",
- "enable-step-actions": "true",
- })
+ featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
+ checkFlagsEnabled := requireAllGates(map[string]string{
+ "enable-artifacts": "true",
+ "enable-step-actions": "true",
+ })
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- c, namespace := setup(ctx, t)
- checkFlagsEnabled(ctx, t, c, "")
- previous := featureFlags.ResultExtractionMethod
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": tc.resultExtractionMethod,
- })
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ c, namespace := setup(ctx, t)
+ checkFlagsEnabled(ctx, t, c, "")
+ previous := featureFlags.ResultExtractionMethod
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": config.ResultExtractionMethodTerminationMessage,
+ })
- knativetest.CleanupOnInterrupt(func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }, t.Logf)
+ knativetest.CleanupOnInterrupt(func() {
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": previous,
+ })
+ tearDown(ctx, t, c, namespace)
+ }, t.Logf)
- defer func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }()
- taskRunName := helpers.ObjectNameForTest(t)
+ defer func() {
+ updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
+ "results-from": previous,
+ })
+ tearDown(ctx, t, c, namespace)
+ }()
+ taskRunName := helpers.ObjectNameForTest(t)
- fqImageName := getTestImage(busyboxImage)
+ fqImageName := getTestImage(busyboxImage)
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := simpleArtifactProducerTask(t, namespace, fqImageName)
- task.Spec.Steps = append(task.Spec.Steps,
- v1.Step{Name: "consume-outputs", Image: fqImageName,
- Command: []string{"sh", "-c", "echo -n $(steps.hello.outputs.image) >> $(step.results.result1.path)"},
- Results: []v1.StepResult{{Name: "result1", Type: v1.ResultsTypeString}}},
- v1.Step{Name: "consume-inputs", Image: fqImageName,
- Command: []string{"sh", "-c", "echo -n $(steps.hello.inputs.source) >> $(step.results.result2.path)"},
- Results: []v1.StepResult{{Name: "result2", Type: v1.ResultsTypeString}}},
- )
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
+ t.Logf("Creating Task and TaskRun in namespace %s", namespace)
+ task := simpleArtifactProducerTask(t, namespace, fqImageName)
+ task.Spec.Steps = append(task.Spec.Steps,
+ v1.Step{Name: "consume-outputs", Image: fqImageName,
+ Command: []string{"sh", "-c", "echo -n $(steps.hello.outputs) >> $(step.results.result1.path)"},
+ Results: []v1.StepResult{{Name: "result1", Type: v1.ResultsTypeString}}},
+ v1.Step{Name: "consume-inputs", Image: fqImageName,
+ Command: []string{"sh", "-c", "echo -n $(steps.hello.inputs) >> $(step.results.result2.path)"},
+ Results: []v1.StepResult{{Name: "result2", Type: v1.ResultsTypeString}}},
+ )
+ if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create Task: %s", err)
+ }
+ taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
metadata:
name: %s
namespace: %s
@@ -269,127 +239,27 @@ spec:
taskRef:
name: %s
`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
-
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
-
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
- wantOut := `[{digest:{sha1:95588b8f34c31eb7d62c92aaa4e6506639b06ef2,sha256:df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48},uri:pkg:balba}]`
- gotOut := taskrun.Status.Steps[1].Results[0].Value.StringVal
- if d := cmp.Diff(wantOut, gotOut); d != "" {
- t.Fatalf(`The expected artifact outputs consumption result doesnot match expected. Here is the diff: %v`, d)
- }
- wantIn := `[{digest:{sha256:b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0},uri:git:jjjsss}]`
- gotIn := taskrun.Status.Steps[2].Results[0].Value.StringVal
- if d := cmp.Diff(wantIn, gotIn); d != "" {
- t.Fatalf(`The expected artifact Inputs consumption result doesnot match expected. Here is the diff: %v`, d)
- }
- })
+ if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create TaskRun: %s", err)
}
-}
-func TestStepProduceResultsAndArtifacts(t *testing.T) {
- tests := []struct {
- desc string
- resultExtractionMethod string
- }{
- {
- desc: "surface artifacts through termination message",
- resultExtractionMethod: config.ResultExtractionMethodTerminationMessage},
- {
- desc: "surface artifacts through sidecar logs",
- resultExtractionMethod: config.ResultExtractionMethodSidecarLogs},
+ if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
+ t.Errorf("Error waiting for TaskRun to finish: %s", err)
}
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
- checkFlagsEnabled := requireAllGates(map[string]string{
- "enable-artifacts": "true",
- "enable-step-actions": "true",
- })
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- c, namespace := setup(ctx, t)
- checkFlagsEnabled(ctx, t, c, "")
- previous := featureFlags.ResultExtractionMethod
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": tc.resultExtractionMethod,
- })
-
- knativetest.CleanupOnInterrupt(func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }, t.Logf)
-
- defer func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- "results-from": previous,
- })
- tearDown(ctx, t, c, namespace)
- }()
- taskRunName := helpers.ObjectNameForTest(t)
-
- fqImageName := getTestImage(busyboxImage)
-
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := produceResultsAndArtifactsTask(t, namespace, fqImageName)
-
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- taskRef:
- name: %s
-`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
-
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
-
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
- if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "source",
- Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha256": "b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"},
- Uri: "git:jjjsss",
- }},
- }}, taskrun.Status.Steps[0].Inputs); d != "" {
- t.Fatalf(`The expected stepState Inputs does not match created taskrun stepState Inputs. Here is the diff: %v`, d)
- }
- if d := cmp.Diff([]v1.TaskRunStepArtifact{{Name: "image",
- Values: []v1.ArtifactValue{{Digest: map[v1.Algorithm]string{"sha1": "95588b8f34c31eb7d62c92aaa4e6506639b06ef2", "sha256": "df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48"},
- Uri: "pkg:balba",
- }},
- }}, taskrun.Status.Steps[0].Outputs); d != "" {
- t.Fatalf(`The expected stepState Outputs does not match created taskrun stepState Outputs. Here is the diff: %v`, d)
- }
-
- wantResult := `result1Value`
- gotResult := taskrun.Status.Steps[0].Results[0].Value.StringVal
- if d := cmp.Diff(wantResult, gotResult); d != "" {
- t.Fatalf(`The expected artifact outputs consumption result doesnot match expected. Here is the diff: %v`, d)
- }
- })
+ taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
+ }
+ wantOut := `[{digest:{sha1:95588b8f34c31eb7d62c92aaa4e6506639b06ef2,sha256:df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48},uri:pkg:balba}]`
+ gotOut := taskrun.Status.Steps[1].Results[0].Value.StringVal
+ if d := cmp.Diff(wantOut, gotOut); d != "" {
+ t.Fatalf(`The expected artifact outputs consumption result doesnot match expected. Here is the diff: %v`, d)
+ }
+ wantIn := `[{digest:{sha256:b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0},uri:git:jjjsss}]`
+ gotIn := taskrun.Status.Steps[2].Results[0].Value.StringVal
+ if d := cmp.Diff(wantIn, gotIn); d != "" {
+ t.Fatalf(`The expected artifact Inputs consumption result doesnot match expected. Here is the diff: %v`, d)
}
}
@@ -411,59 +281,7 @@ spec:
{
"inputs":[
{
- "name":"source",
- "values":[
- {
- "uri":"git:jjjsss",
- "digest":{
- "sha256":"b35cacccfdb1e24dc497d15d553891345fd155713ffe647c281c583269eaaae0"
- }
- }
- ]
- }
- ],
- "outputs":[
- {
- "name":"image",
- "buildOutput":true,
- "values":[
- {
- "uri":"pkg:balba",
- "digest":{
- "sha256":"df85b9e3983fe2ce20ef76ad675ecf435cc99fc9350adc54fa230bae8c32ce48",
- "sha1":"95588b8f34c31eb7d62c92aaa4e6506639b06ef2"
- }
- }
- ]
- }
- ]
- }
- EOF
-`, helpers.ObjectNameForTest(t), namespace, fqImageName))
- return task
-}
-
-func produceResultsAndArtifactsTask(t *testing.T, namespace string, fqImageName string) *v1.Task {
- t.Helper()
- task := parse.MustParseV1Task(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: hello
- image: %s
- command: ['/bin/sh']
- results:
- - name: result1
- args:
- - "-c"
- - |
- cat > $(step.artifacts.path) << EOF
- {
- "inputs":[
- {
- "name":"source",
+ "name":"input-artifacts",
"values":[
{
"uri":"git:jjjsss",
@@ -476,7 +294,7 @@ spec:
],
"outputs":[
{
- "name":"image",
+ "name":"build-result",
"values":[
{
"uri":"pkg:balba",
@@ -490,7 +308,6 @@ spec:
]
}
EOF
- echo -n result1Value >> $(step.results.result1.path)
`, helpers.ObjectNameForTest(t), namespace, fqImageName))
return task
}
@@ -524,7 +341,6 @@ spec:
"outputs":[
{
"name":"build-result",
- "buildOutput":false,
"values":[
{
"uri":"pkg:balba",
diff --git a/upstream/test/cancel_test.go b/upstream/test/cancel_test.go
index e5c10bd3e64..5b5fda9d9a3 100644
--- a/upstream/test/cancel_test.go
+++ b/upstream/test/cancel_test.go
@@ -42,6 +42,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) {
// on failure, to ensure that cancelling the PipelineRun doesn't cause
// the retrying TaskRun to retry.
for _, numRetries := range []int{0, 1} {
+ numRetries := numRetries // capture range variable
specStatus := v1.PipelineRunSpecStatusCancelled
t.Run(fmt.Sprintf("retries=%d,status=%s", numRetries, specStatus), func(t *testing.T) {
ctx := context.Background()
@@ -65,7 +66,7 @@ spec:
retries: %d
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: 'sleep 5000'
`, helpers.ObjectNameForTest(t), namespace, numRetries))
@@ -184,134 +185,3 @@ spec:
})
}
}
-
-// TestCancelActivePipelineRunWithCompletedTaskRuns cancels a PipelineRun with completed TaskRuns and verifies TaskRun statuses.
-func TestCancelActivePipelineRunWithCompletedTaskRuns(t *testing.T) {
- specStatus := v1.PipelineRunSpecStatusCancelled
- t.Run("status="+specStatus, func(t *testing.T) {
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- requirements := []func(context.Context, *testing.T, *clients, string){}
- c, namespace := setup(ctx, t, requirements...)
- t.Parallel()
-
- knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
- defer tearDown(ctx, t, c, namespace)
-
- pipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- pipelineSpec:
- tasks:
- - name: task-succeeded
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- script: 'sleep 1'
- - name: task-running
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- script: 'sleep 5000'
-`, helpers.ObjectNameForTest(t), namespace))
-
- t.Logf("Creating PipelineRun in namespace %s", namespace)
- if _, err := c.V1PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err)
- }
-
- t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace)
- if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning", v1Version); err != nil {
- t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err)
- }
-
- taskrunList, err := c.V1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name})
- if err != nil {
- t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err)
- }
-
- t.Logf("Waiting for PipelineRun %s from namespace %s with one TaskRun succeeded and another one running.", pipelineRun.Name, namespace)
- for _, taskrunItem := range taskrunList.Items {
- name := taskrunItem.Name
- switch n := taskrunItem.Labels["tekton.dev/pipelineTask"]; {
- case n == "task-succeeded":
- err := WaitForTaskRunState(ctx, c, name, TaskRunSucceed(name), "TaskRunSuccess", v1Version)
- if err != nil {
- t.Errorf("Error waiting for TaskRun %s to be succeed: %v", name, err)
- }
- case n == "task-running":
- err := WaitForTaskRunState(ctx, c, name, Running(name), "TaskRunRunning", v1Version)
- if err != nil {
- t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err)
- }
- }
- }
-
- pr, err := c.V1PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err)
- }
-
- patches := []jsonpatch.JsonPatchOperation{{
- Operation: "add",
- Path: "/spec/status",
- Value: specStatus,
- }}
- patchBytes, err := json.Marshal(patches)
- if err != nil {
- t.Fatalf("failed to marshal patch bytes in order to cancel")
- }
- if _, err := c.V1PipelineRunClient.Patch(ctx, pr.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, ""); err != nil {
- t.Fatalf("Failed to patch PipelineRun `%s` with cancellation: %s", pipelineRun.Name, err)
- }
-
- expectedReason := v1.PipelineRunReasonCancelled.String()
- expectedCondition := FailedWithReason(expectedReason, pipelineRun.Name)
- t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", pipelineRun.Name, namespace)
- if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, expectedCondition, expectedReason, v1Version); err != nil {
- t.Errorf("Error waiting for PipelineRun %q to finished: %s", pipelineRun.Name, err)
- }
-
- t.Logf("Waiting for TaskRuns in PipelineRun %s in namespace %s to be cancelled", pipelineRun.Name, namespace)
- for _, taskrunItem := range taskrunList.Items {
- name := taskrunItem.Name
- switch n := taskrunItem.Labels["tekton.dev/pipelineTask"]; {
- case n == "task-succeeded":
- // the completed TaskRun no need to wait
- case n == "task-running":
- err := WaitForTaskRunState(ctx, c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled", v1Version)
- if err != nil {
- t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err)
- }
- }
- }
-
- taskrunList, err = c.V1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name})
- if err != nil {
- t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err)
- }
- for _, taskrunItem := range taskrunList.Items {
- switch n := taskrunItem.Labels["tekton.dev/pipelineTask"]; {
- case n == "task-succeeded":
- // the completed TaskRun should not be changed
- if taskrunItem.Spec.Status != "" {
- t.Fatalf("The status is %s, but it should be empty.", taskrunItem.Spec.Status)
- }
- if taskrunItem.Spec.StatusMessage != "" {
- t.Fatalf("Status message is set to %s while it should be empty.", taskrunItem.Spec.StatusMessage)
- }
- case n == "task-running":
- // the running TaskRun should be cancelled
- if taskrunItem.Spec.Status != v1.TaskRunSpecStatusCancelled {
- t.Fatalf("Status is %s while it should have been %s", taskrunItem.Spec.Status, v1.TaskRunSpecStatusCancelled)
- }
- if taskrunItem.Spec.StatusMessage != v1.TaskRunCancelledByPipelineMsg {
- t.Fatalf("Status message is set to %s while it should be %s.", taskrunItem.Spec.StatusMessage, v1.TaskRunCancelledByPipelineMsg)
- }
- }
- }
- })
-}
diff --git a/upstream/test/clients.go b/upstream/test/clients.go
index dc0424a5275..0a0a4f21400 100644
--- a/upstream/test/clients.go
+++ b/upstream/test/clients.go
@@ -68,7 +68,7 @@ type clients struct {
V1TaskClient v1.TaskInterface
V1TaskRunClient v1.TaskRunInterface
V1PipelineRunClient v1.PipelineRunInterface
- V1beta1StepActionClient v1beta1.StepActionInterface
+ V1alpha1StepActionClient v1alpha1.StepActionInterface
}
// newClients instantiates and returns several clientsets required for making requests to the
@@ -110,6 +110,6 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client
c.V1TaskClient = cs.TektonV1().Tasks(namespace)
c.V1TaskRunClient = cs.TektonV1().TaskRuns(namespace)
c.V1PipelineRunClient = cs.TektonV1().PipelineRuns(namespace)
- c.V1beta1StepActionClient = cs.TektonV1beta1().StepActions(namespace)
+ c.V1alpha1StepActionClient = cs.TektonV1alpha1().StepActions(namespace)
return c
}
diff --git a/upstream/test/conformance/conformance_test.go b/upstream/test/conformance/conformance_test.go
index 6df4a039273..329b27478f7 100644
--- a/upstream/test/conformance/conformance_test.go
+++ b/upstream/test/conformance/conformance_test.go
@@ -39,13 +39,8 @@ package conformance_test
import (
"fmt"
- "strconv"
- "strings"
"testing"
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/test/parse"
"knative.dev/pkg/test/helpers"
)
@@ -53,7 +48,6 @@ import (
const (
succeedConditionStatus = "True"
conformanceVersion = "v1"
- failureConditionStatus = "False"
)
// TestTaskRunConditions examines population of Conditions
@@ -71,7 +65,7 @@ spec:
taskSpec:
steps:
- name: add
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo Hello world!
`, helpers.ObjectNameForTest(t))
@@ -108,7 +102,7 @@ spec:
taskSpec:
steps:
- name: add
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
echo Hello world!
`, helpers.ObjectNameForTest(t))
@@ -126,1438 +120,3 @@ spec:
t.Error(err)
}
}
-
-const (
- TaskRunInputType = "TaskRun"
- PipelineRunInputType = "PipelineRun"
- ExpectRunToFail = true
-)
-
-func TestStepScript(t *testing.T) {
- t.Parallel()
- expectedSteps := map[string]string{
- "node": "Completed",
- "perl": "Completed",
- "params-applied": "Completed",
- "args-allowed": "Completed",
- "dollar-signs-allowed": "Completed",
- "bash-variable-evaluations": "Completed",
- }
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- params:
- - name: PARAM
- default: param-value
- steps:
- - name: node
- image: mirror.gcr.io/node:lts-alpine3.20
- script: |
- #!/usr/bin/env node
- console.log("Hello from Node!")
- - name: perl
- image: mirror.gcr.io/perl:devel-bullseye
- script: |
- #!/usr/bin/perl
- print "Hello from Perl!"
- # Test that param values are replaced.
- - name: params-applied
- image: mirror.gcr.io/python:3.12.4
- script: |
- #!/usr/bin/env python3
- v = '$(params.PARAM)'
- if v != 'param-value':
- print('Param values not applied')
- print('Got: ', v)
- exit(1)
- # Test that args are allowed and passed to the script as expected.
- - name: args-allowed
- image: mirror.gcr.io/ubuntu
- args: ['hello', 'world']
- script: |
- #!/usr/bin/env bash
- [[ $# == 2 ]]
- [[ $1 == "hello" ]]
- [[ $2 == "world" ]]
- # Test that multiple dollar signs next to each other are not replaced by Kubernetes
- - name: dollar-signs-allowed
- image: mirror.gcr.io/python:3.12.4
- script: |
- #!/usr/bin/env python3
- if '$' != '\u0024':
- print('single dollar signs ($) are not passed through as expected :(')
- exit(1)
- if '$$' != '\u0024\u0024':
- print('double dollar signs ($$) are not passed through as expected :(')
- exit(2)
- if '$$$' != '\u0024\u0024\u0024':
- print('three dollar signs ($$$) are not passed through as expected :(')
- exit(3)
- if '$$$$' != '\u0024\u0024\u0024\u0024':
- print('four dollar signs ($$$$) are not passed through as expected :(')
- exit(4)
- print('dollar signs appear to be handled correctly! :)')
-
- # Test that bash scripts with variable evaluations work as expected
- - name: bash-variable-evaluations
- image: mirror.gcr.io/bash
- script: |
- #!/usr/bin/env bash
- set -xe
- var1=var1_value
- var2=var1
- echo $(eval echo \$$var2) > tmpfile
- eval_result=$(cat tmpfile)
- if [ "$eval_result" != "var1_value" ] ; then
- echo "unexpected eval result: $eval_result"
- exit 1
- fi
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if len(resolvedTR.Status.Steps) != len(expectedSteps) {
- t.Errorf("Expected length of steps %v but has: %v", len(expectedSteps), len(resolvedTR.Status.Steps))
- }
-
- for _, resolvedStep := range resolvedTR.Status.Steps {
- resolvedStepTerminatedReason := resolvedStep.Terminated.Reason
- if expectedStepState, ok := expectedSteps[resolvedStep.Name]; ok {
- if resolvedStepTerminatedReason != expectedStepState {
- t.Fatalf("Expect step %s to have completed successfully but it has Termination Reason: %s", resolvedStep.Name, resolvedStepTerminatedReason)
- }
- } else {
- t.Fatalf("Does not expect to have step: %s", resolvedStep.Name)
- }
- }
-}
-
-func TestStepEnv(t *testing.T) {
- t.Parallel()
- envVarName := "FOO"
- envVarVal := "foooooooo"
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - name: bash
- image: mirror.gcr.io/ubuntu
- env:
- - name: %s
- value: %s
- script: |
- #!/usr/bin/env bash
- set -euxo pipefail
- echo "Hello from Bash!"
- echo FOO is ${FOO}
- echo substring is ${FOO:2:4}
-`, helpers.ObjectNameForTest(t), envVarName, envVarVal)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- resolvedStep := resolvedTR.Status.Steps[0]
- resolvedStepTerminatedReason := resolvedStep.Terminated.Reason
- if resolvedStepTerminatedReason != "Completed" {
- t.Fatalf("Expect step %s to have completed successfully but it has Termination Reason: %s", resolvedStep.Name, resolvedStepTerminatedReason)
- }
-
- resolvedStepEnv := resolvedTR.Status.TaskSpec.Steps[0].Env[0]
- if resolvedStepEnv.Name != envVarName {
- t.Fatalf("Expect step %s to have EnvVar Name %s but it has: %s", resolvedStep.Name, envVarName, resolvedStepEnv.Name)
- }
- if resolvedStepEnv.Value != envVarVal {
- t.Fatalf("Expect step %s to have EnvVar Value %s but it has: %s", resolvedStep.Name, envVarVal, resolvedStepEnv.Value)
- }
-}
-
-func TestStepWorkingDir(t *testing.T) {
- t.Parallel()
- defaultWorkingDir := "/workspace"
- overrideWorkingDir := "/a/path/too/far"
-
- expectedWorkingDirs := map[string]string{
- "default": defaultWorkingDir,
- "override": overrideWorkingDir,
- }
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - name: default
- image: mirror.gcr.io/ubuntu
- workingDir: %s
- script: |
- #!/usr/bin/env bash
- if [[ $PWD != /workspace ]]; then
- exit 1
- fi
- - name: override
- image: mirror.gcr.io/ubuntu
- workingDir: %s
- script: |
- #!/usr/bin/env bash
- if [[ $PWD != /a/path/too/far ]]; then
- exit 1
- fi
-`, helpers.ObjectNameForTest(t), defaultWorkingDir, overrideWorkingDir)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- for _, resolvedStep := range resolvedTR.Status.Steps {
- resolvedStepTerminatedReason := resolvedStep.Terminated.Reason
- if resolvedStepTerminatedReason != "Completed" {
- t.Fatalf("Expect step %s to have completed successfully but it has Termination Reason: %s", resolvedStep.Name, resolvedStepTerminatedReason)
- }
- }
-
- for _, resolvedStepSpec := range resolvedTR.Status.TaskSpec.Steps {
- resolvedStepWorkingDir := resolvedStepSpec.WorkingDir
- if resolvedStepWorkingDir != expectedWorkingDirs[resolvedStepSpec.Name] {
- t.Fatalf("Expect step %s to have WorkingDir %s but it has: %s", resolvedStepSpec.Name, expectedWorkingDirs[resolvedStepSpec.Name], resolvedStepWorkingDir)
- }
- }
-}
-
-func TestStepStateImageID(t *testing.T) {
- t.Parallel()
- // Step images can be specified by digest.
- image := "busybox@sha256:1303dbf110c57f3edf68d9f5a16c082ec06c4cf7604831669faf2c712260b5a0"
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - image: %s
- args: ['-c', 'echo hello']
-`, helpers.ObjectNameForTest(t), image)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, succeedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(resolvedTR.Status.Steps) != 1 {
- t.Errorf("Expect vendor service to provide 1 Step in StepState but it has: %v", len(resolvedTR.Status.Steps))
- }
-
- if !strings.HasSuffix(resolvedTR.Status.Steps[0].ImageID, image) {
- t.Errorf("Expect vendor service to provide image %s in StepState but it has: %s", image, resolvedTR.Status.Steps[0].ImageID)
- }
-}
-
-func TestStepStateName(t *testing.T) {
- t.Parallel()
- stepName := "step-foo"
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - name: %s
- image: mirror.gcr.io/busybox
- args: ['-c', 'echo hello']
-`, helpers.ObjectNameForTest(t), stepName)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, succeedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(resolvedTR.Status.Steps) != 1 {
- t.Errorf("Expect vendor service to provide 1 Step in StepState but it has: %v", len(resolvedTR.Status.Steps))
- }
-
- if resolvedTR.Status.Steps[0].Name != stepName {
- t.Errorf("Expect vendor service to provide Name %s in StepState but it has: %s", stepName, resolvedTR.Status.Steps[0].Name)
- }
-}
-
-// Examines the ContainerStateTerminated ExitCode, StartedAt, FinishtedAt and Reason
-func TestStepStateContainerStateTerminated(t *testing.T) {
- t.Parallel()
- successInputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- args: ['-c', 'echo hello']
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- successOutputYAML, err := ProcessAndSendToTekton(successInputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- successResolvedTR := parse.MustParseV1TaskRun(t, successOutputYAML)
-
- if err := checkTaskRunConditionSucceeded(successResolvedTR.Status, succeedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(successResolvedTR.Status.Steps) != 1 {
- t.Errorf("Expect vendor service to provide 1 Step in StepState but it has: %v", len(successResolvedTR.Status.Steps))
- }
-
- startTime := successResolvedTR.Status.Steps[0].Terminated.StartedAt
- finishTime := successResolvedTR.Status.Steps[0].Terminated.FinishedAt
-
- if startTime.IsZero() {
- t.Errorf("Expect vendor service to provide StartTimeStamp in StepState.Terminated but it does not provide so")
- }
-
- if finishTime.IsZero() {
- t.Errorf("Expect vendor service to provide FinishTimeStamp in StepState.Terminated but it does not provide so")
- }
-
- if finishTime.Before(&startTime) {
- t.Errorf("Expect vendor service to provide StartTimeStamp %v earlier than FinishTimeStamp in StepState.Terminated %v but it does not provide so", startTime, finishTime)
- }
-
- if successResolvedTR.Status.Steps[0].Terminated.ExitCode != 0 {
- t.Errorf("Expect vendor service to provide ExitCode in StepState.Terminated to be 0 but it has: %v", successResolvedTR.Status.Steps[0].Terminated.ExitCode)
- }
-
- if successResolvedTR.Status.Steps[0].Terminated.Reason != "Completed" {
- t.Errorf("Expect vendor service to provide Reason in StepState.Terminated to be Completed but it has: %s", successResolvedTR.Status.Steps[0].Terminated.Reason)
- }
-
- failureInputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- script: exit 1
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- failureOutputYAML, err := ProcessAndSendToTekton(failureInputYAML, TaskRunInputType, t, ExpectRunToFail)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- failureResolvedTR := parse.MustParseV1TaskRun(t, failureOutputYAML)
-
- if err := checkTaskRunConditionSucceeded(failureResolvedTR.Status, failureConditionStatus, "Failed"); err != nil {
- t.Error(err)
- }
-
- if len(failureResolvedTR.Status.Steps) != 1 {
- t.Errorf("Expect vendor service to provide 1 Step in StepState but it has: %v", len(failureResolvedTR.Status.Steps))
- }
-
- startTime = failureResolvedTR.Status.Steps[0].Terminated.StartedAt
- finishTime = failureResolvedTR.Status.Steps[0].Terminated.FinishedAt
-
- if startTime.IsZero() {
- t.Errorf("Expect vendor service to provide StartTimeStamp in StepState.Terminated but it does not provide so")
- }
-
- if finishTime.IsZero() {
- t.Errorf("Expect vendor service to provide FinishTimeStamp in StepState.Terminated but it does not provide so")
- }
-
- if finishTime.Before(&startTime) {
- t.Errorf("Expect vendor service to provide StartTimeStamp %v earlier than FinishTimeStamp in StepState.Terminated %v but it does not provide so", startTime, finishTime)
- }
-
- if failureResolvedTR.Status.Steps[0].Terminated.ExitCode != 1 {
- t.Errorf("Expect vendor service to provide ExitCode in StepState.Terminated to be 0 but it has: %v", failureResolvedTR.Status.Steps[0].Terminated.ExitCode)
- }
-
- if failureResolvedTR.Status.Steps[0].Terminated.Reason != "Error" {
- t.Errorf("Expect vendor service to provide Reason in StepState.Terminated to be Error but it has: %s", failureResolvedTR.Status.Steps[0].Terminated.Reason)
- }
-}
-
-func TestSidecarName(t *testing.T) {
- sidecarName := "hello-sidecar"
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- sidecars:
- - name: %s
- image: mirror.gcr.io/ubuntu
- script: echo "hello from sidecar"
- steps:
- - name: hello-step
- image: mirror.gcr.io/ubuntu
- script: echo "hello from step"
-`, helpers.ObjectNameForTest(t), sidecarName)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(resolvedTR.Spec.TaskSpec.Sidecars) != 1 {
- t.Errorf("Expect vendor service to provide 1 Sidcar but it has: %v", len(resolvedTR.Spec.TaskSpec.Sidecars))
- }
-
- if resolvedTR.Spec.TaskSpec.Sidecars[0].Name != sidecarName {
- t.Errorf("Expect vendor service to provide Sidcar name %s but it has: %s", sidecarName, resolvedTR.Spec.TaskSpec.Sidecars[0].Name)
- }
-}
-
-// This test relies on the support of Sidecar Script and its volumeMounts.
-// For sidecar tests, sidecars don't have /workspace mounted by default, so we have to define
-// our own shared volume. For vendor services, please feel free to override the shared workspace
-// supported in your sidecar. Otherwise there are no existing v1 conformance `REQUIRED` fields that
-// are going to be used for verifying Sidecar functionality.
-func TestSidecarScriptSuccess(t *testing.T) {
- succeedInputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- sidecars:
- - name: slow-sidecar
- image: mirror.gcr.io/ubuntu
- script: |
- echo "hello from sidecar" > /shared/message
- volumeMounts:
- - name: shared
- mountPath: /shared
-
- steps:
- - name: check-ready
- image: mirror.gcr.io/ubuntu
- script: cat /shared/message
- volumeMounts:
- - name: shared
- mountPath: /shared
-
- # Sidecars don't have /workspace mounted by default, so we have to define
- # our own shared volume.
- volumes:
- - name: shared
- emptyDir: {}
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- succeedOutputYAML, err := ProcessAndSendToTekton(succeedInputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- succeededResolvedTR := parse.MustParseV1TaskRun(t, succeedOutputYAML)
-
- if err := checkTaskRunConditionSucceeded(succeededResolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-}
-
-func TestSidecarScriptFailure(t *testing.T) {
- failInputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- sidecars:
- - name: exit-sidecar
- image: mirror.gcr.io/ubuntu
- script: exit 1
-
- steps:
- - name: check-ready
- image: mirror.gcr.io/ubuntu
- script: cat /shared/message
- volumeMounts:
- - name: shared
- mountPath: /shared
-
- # Sidecars don't have /workspace mounted by default, so we have to define
- # our own shared volume.
- volumes:
- - name: shared
- emptyDir: {}
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- failOutputYAML, err := ProcessAndSendToTekton(failInputYAML, TaskRunInputType, t, ExpectRunToFail)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- failResolvedTR := parse.MustParseV1TaskRun(t, failOutputYAML)
-
- if len(failResolvedTR.Spec.TaskSpec.Sidecars) != 1 {
- t.Errorf("Expect vendor service to provide 1 Sidcar but it has: %v", len(failResolvedTR.Spec.TaskSpec.Sidecars))
- }
-
- if err := checkTaskRunConditionSucceeded(failResolvedTR.Status, "False", "Failed"); err != nil {
- t.Error(err)
- }
-}
-
-func TestSidecarArgAndCommand(t *testing.T) {
- failInputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- sidecars:
- - name: slow-sidecar
- image: mirror.gcr.io/ubuntu
- command: [/bin/bash]
- args: [-c, "echo 'hello from sidecar' > /shared/message"]
- volumeMounts:
- - name: shared
- mountPath: /shared
- steps:
- - name: check-ready
- image: mirror.gcr.io/ubuntu
- command:
- - cat
- args:
- - '/shared/message'
- volumeMounts:
- - name: shared
- mountPath: /shared
-
- # Sidecars don't have /workspace mounted by default, so we have to define
- # our own shared volume.
- volumes:
- - name: shared
- emptyDir: {}
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- failOutputYAML, err := ProcessAndSendToTekton(failInputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- failResolvedTR := parse.MustParseV1TaskRun(t, failOutputYAML)
-
- if len(failResolvedTR.Spec.TaskSpec.Sidecars) != 1 {
- t.Errorf("Expect vendor service to provide 1 Sidcar but it has: %v", len(failResolvedTR.Spec.TaskSpec.Sidecars))
- }
-
- if err := checkTaskRunConditionSucceeded(failResolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-}
-
-func TestStringTaskParam(t *testing.T) {
- stringParam := "foo-string"
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- params:
- - name: "string-param"
- value: %s
- taskSpec:
- params:
- - name: "string-param"
- type: string
- steps:
- - name: "check-param"
- image: mirror.gcr.io/bash
- script: |
- if [[ $(params.string-param) != %s ]]; then
- exit 1
- fi
-`, helpers.ObjectNameForTest(t), stringParam, stringParam)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if len(resolvedTR.Spec.Params) != 1 {
- t.Errorf("Expect vendor service to provide 1 Param but it has: %v", len(resolvedTR.Spec.Params))
- }
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
-}
-
-func TestArrayTaskParam(t *testing.T) {
- var arrayParam0, arrayParam1 = "foo", "bar"
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- params:
- - name: array-to-concat
- value:
- - %s
- - %s
- taskSpec:
- results:
- - name: "concat-array"
- params:
- - name: array-to-concat
- type: array
- steps:
- - name: concat-array-params
- image: mirror.gcr.io/alpine
- command: ["/bin/sh", "-c"]
- args:
- - echo -n $(params.array-to-concat[0])"-"$(params.array-to-concat[1]) | tee $(results.concat-array.path);
-`, helpers.ObjectNameForTest(t), arrayParam0, arrayParam1)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if len(resolvedTR.Spec.Params) != 1 {
- t.Errorf("Examining TaskRun Param: expect vendor service to provide TaskRun with 1 Array Param but it has: %v", len(resolvedTR.Spec.Params))
- }
- if len(resolvedTR.Spec.Params[0].Value.ArrayVal) != 2 {
- t.Errorf("Examining TaskParams: expect vendor service to provide 2 Task Array Param values but it has: %v", len(resolvedTR.Spec.Params[0].Value.ArrayVal))
- }
-
- // Utilizing TaskResult to verify functionality of Array Params
- if len(resolvedTR.Status.Results) != 1 {
- t.Errorf("Expect vendor service to provide 1 result but it has: %v", len(resolvedTR.Status.Results))
- }
- if resolvedTR.Status.Results[0].Value.StringVal != arrayParam0+"-"+arrayParam1 {
- t.Errorf("Not producing correct result, expect to get \"%s\" but has: \"%s\"", arrayParam0+"-"+arrayParam1, resolvedTR.Status.Results[0].Value.StringVal)
- }
-}
-
-func TestTaskParamDefaults(t *testing.T) {
- stringParam := "string-foo"
- arrayParam := []string{"array-foo", "array-bar"}
- expectedStringParamResultVal := "string-foo-string-baz-default"
- expectedArrayParamResultVal := "array-foo-array-bar-default"
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- params:
- - name: array-param
- value:
- - %s
- - %s
- - name: string-param
- value: %s
- taskSpec:
- results:
- - name: array-output
- - name: string-output
- params:
- - name: array-param
- type: array
- - name: array-defaul-param
- type: array
- default:
- - "array-foo-default"
- - "array-bar-default"
- - name: string-param
- type: string
- - name: string-default
- type: string
- default: "string-baz-default"
- steps:
- - name: string-params-to-result
- image: mirror.gcr.io/bash
- command: ["/bin/sh", "-c"]
- args:
- - echo -n $(params.string-param)"-"$(params.string-default) | tee $(results.string-output.path);
- - name: array-params-to-result
- image: mirror.gcr.io/bash
- command: ["/bin/sh", "-c"]
- args:
- - echo -n $(params.array-param[0])"-"$(params.array-defaul-param[1]) | tee $(results.array-output.path);
-`, helpers.ObjectNameForTest(t), arrayParam[0], arrayParam[1], stringParam)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if len(resolvedTR.Spec.Params) != 2 {
- t.Errorf("Expect vendor service to provide 2 Params but it has: %v", len(resolvedTR.Spec.Params))
- }
- if len(resolvedTR.Spec.Params[0].Value.ArrayVal) != 2 {
- t.Errorf("Expect vendor service to provide 2 Task Array Params but it has: %v", len(resolvedTR.Spec.Params))
- }
- for _, param := range resolvedTR.Spec.Params {
- if param.Name == "array-param" {
- paramArr := param.Value.ArrayVal
- for i, _ := range paramArr {
- if paramArr[i] != arrayParam[i] {
- t.Errorf("Expect Params to match %s: %v", arrayParam[i], paramArr[i])
- }
- }
- }
- if param.Name == "string-param" {
- if param.Value.StringVal != stringParam {
- t.Errorf("Not producing correct result, expect to get \"%s\" but has: \"%s\"", stringParam, param.Value.StringVal)
- }
- }
- }
-
- // Utilizing TaskResult to verify functionality of Task Params Defaults
- if len(resolvedTR.Status.Results) != 2 {
- t.Errorf("Expect vendor service to provide 2 result but it has: %v", len(resolvedTR.Status.Results))
- }
-
- for _, result := range resolvedTR.Status.Results {
- if result.Name == "string-output" {
- resultVal := result.Value.StringVal
- if resultVal != expectedStringParamResultVal {
- t.Errorf("Not producing correct result, expect to get \"%s\" but has: \"%s\"", expectedStringParamResultVal, resultVal)
- }
- }
- if result.Name == "array-output" {
- resultVal := result.Value.StringVal
- if resultVal != expectedArrayParamResultVal {
- t.Errorf("Not producing correct result, expect to get \"%s\" but has: \"%s\"", expectedArrayParamResultVal, resultVal)
- }
- }
- }
-}
-
-func TestTaskParamDescription(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- params:
- - name: foo
- description: foo param
- default: "foo"
- steps:
- - name: add
- image: mirror.gcr.io/alpine
- env:
- - name: OP1
- value: $(params.foo)
- command: ["/bin/sh", "-c"]
- args:
- - echo -n ${OP1}
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if resolvedTR.Spec.TaskSpec.Params[0].Description != "foo param" {
- t.Errorf("Expect vendor service to provide Param Description \"foo param\" but it has: %s", resolvedTR.Spec.TaskSpec.Params[0].Description)
- }
-
- if resolvedTR.Status.TaskSpec.Params[0].Description != "foo param" {
- t.Errorf("Expect vendor service to provide Param Description \"foo param\" but it has: %s", resolvedTR.Spec.TaskSpec.Params[0].Description)
- }
-}
-
-// The goal of the Taskrun Workspace test is to verify if different Steps in the TaskRun could
-// pass data among each other.
-func TestTaskRunWorkspace(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- workspaces:
- - name: custom-workspace
- # Please note that vendor services are welcomed to override the following actual workspace binding type.
- # This is considered as the implementation detail for the conformant workspace fields.
- emptyDir: {}
- taskSpec:
- steps:
- - name: write
- image: mirror.gcr.io/ubuntu
- script: echo $(workspaces.custom-workspace.path) > $(workspaces.custom-workspace.path)/foo
- - name: read
- image: mirror.gcr.io/ubuntu
- script: cat $(workspaces.custom-workspace.path)/foo
- - name: check
- image: mirror.gcr.io/ubuntu
- script: |
- if [ "$(cat $(workspaces.custom-workspace.path)/foo)" != "/workspace/custom-workspace" ]; then
- echo $(cat $(workspaces.custom-workspace.path)/foo)
- exit 1
- fi
- workspaces:
- - name: custom-workspace
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(resolvedTR.Spec.Workspaces) != 1 {
- t.Errorf("Expect vendor service to provide 1 Workspace but it has: %v", len(resolvedTR.Spec.Workspaces))
- }
-
- if resolvedTR.Spec.Workspaces[0].Name != "custom-workspace" {
- t.Errorf("Expect vendor service to provide Workspace 'custom-workspace' but it has: %s", resolvedTR.Spec.Workspaces[0].Name)
- }
-
- if resolvedTR.Status.TaskSpec.Workspaces[0].Name != "custom-workspace" {
- t.Errorf("Expect vendor service to provide Workspace 'custom-workspace' in TaskRun.Status.TaskSpec but it has: %s", resolvedTR.Spec.Workspaces[0].Name)
- }
-}
-
-// TestTaskRunTimeout examines the Timeout behaviour for
-// TaskRun level. It creates a TaskRun with Timeout and wait in the Step of the
-// inline Task for the time length longer than the specified Timeout.
-// The TaskRun is expected to fail with the Reason `TaskRunTimeout`.
-func TestTaskRunTimeout(t *testing.T) {
- expectedFailedStatus := true
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- timeout: 15s
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- command: ['/bin/sh']
- args: ['-c', 'sleep 15001']
-`, helpers.ObjectNameForTest(t))
-
- // Execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t, expectedFailedStatus)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, "False", "TaskRunTimeout"); err != nil {
- t.Error(err)
- }
-}
-
-// TestConditions examines population of Conditions
-// fields. It creates the a TaskRun with minimal specifications and checks the
-// required Condition Status and Type.
-func TestConditions(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: TaskRun
-metadata:
- name: %s
-spec:
- taskSpec:
- steps:
- - name: add
- image: mirror.gcr.io/ubuntu
- script:
- echo Hello world!
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, TaskRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedTR := parse.MustParseV1TaskRun(t, outputYAML)
-
- if err := checkTaskRunConditionSucceeded(resolvedTR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-}
-
-// TestPipelineTaskParams examines the PipelineTask
-// Params functionality by creating a Pipeline that performs addition in its
-// Task for validation.
-func TestPipelineTaskParams(t *testing.T) {
- var op0, op1 = 10, 1
- expectedParams := v1.Params{{
- Name: "op0",
- Value: v1.ParamValue{StringVal: strconv.Itoa(op0)},
- }, {
- Name: "op1",
- Value: v1.ParamValue{StringVal: strconv.Itoa(op1)}},
- }
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- pipelineSpec:
- tasks:
- - name: sum-params
- taskSpec:
- params:
- - name: op0
- type: string
- description: The first integer from PipelineTask Param
- - name: op1
- type: string
- description: The second integer from PipelineTask Param
- steps:
- - name: sum
- image: mirror.gcr.io/bash
- script: |
- #!/usr/bin/env bash
- echo -n $(( "$(inputs.params.op0)" + "$(inputs.params.op1)" ))
- params:
- - name: op0
- value: %s
- - name: op1
- value: %s
-`, helpers.ObjectNameForTest(t), strconv.Itoa(op0), strconv.Itoa(op1))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
- if len(resolvedPR.Spec.PipelineSpec.Tasks) != 1 {
- t.Errorf("Expect vendor service to provide 1 PipelineTask but got: %v", len(resolvedPR.Spec.PipelineSpec.Tasks))
- }
-
- if d := cmp.Diff(expectedParams, resolvedPR.Spec.PipelineSpec.Tasks[0].Params, cmpopts.IgnoreFields(v1.ParamValue{}, "Type")); d != "" {
- t.Errorf("Expect vendor service to provide 2 params 10, 1, but got: %v", d)
-
- }
-}
-
-func TestPipelineResult(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- params:
- - name: prefix
- value: prefix
- pipelineSpec:
- results:
- - name: output
- type: string
- value: $(tasks.do-something.results.output)
- params:
- - name: prefix
- tasks:
- - name: generate-suffix
- taskSpec:
- results:
- - name: suffix
- steps:
- - name: generate-suffix
- image: mirror.gcr.io/alpine
- script: |
- echo -n "suffix" > $(results.suffix.path)
- - name: do-something
- taskSpec:
- results:
- - name: output
- params:
- - name: arg
- steps:
- - name: do-something
- image: mirror.gcr.io/alpine
- script: |
- echo -n "$(params.arg)" | tee $(results.output.path)
- params:
- - name: arg
- value: "$(params.prefix):$(tasks.generate-suffix.results.suffix)"
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- if len(resolvedPR.Status.Results) != 1 {
- t.Errorf("Expect vendor service to provide 1 result but has: %v", len(resolvedPR.Status.Results))
- }
-
- if resolvedPR.Status.Results[0].Value.StringVal != "prefix:suffix" {
- t.Errorf("Not producing correct result :\"%s\"", resolvedPR.Status.Results[0].Value.StringVal)
- }
-}
-
-func TestPipelineWorkspace(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- workspaces:
- - name: custom-workspace
- # Vendor service could override the actual workspace binding type.
- # This is considered as the implementation detail for the conformant workspace fields.
- volumeClaimTemplate:
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 16Mi
- volumeMode: Filesystem
- pipelineSpec:
- workspaces:
- - name: custom-workspace
- tasks:
- - name: write-task
- taskSpec:
- steps:
- - name: write-step
- image: mirror.gcr.io/ubuntu
- script: |
- echo $(workspaces.custom-workspace-write-task.path) > $(workspaces.custom-workspace-write-task.path)/foo
- cat $(workspaces.custom-workspace-write-task.path)/foo
- workspaces:
- - name: custom-workspace-write-task
- workspaces:
- - name: custom-workspace-write-task
- workspace: custom-workspace
- - name: read-task
- taskSpec:
- steps:
- - name: read-step
- image: mirror.gcr.io/ubuntu
- script: cat $(workspaces.custom-workspace-read-task.path)/foo
- workspaces:
- - name: custom-workspace-read-task
- workspaces:
- - name: custom-workspace-read-task
- workspace: custom-workspace
- runAfter:
- - write-task
- - name: check-task
- taskSpec:
- steps:
- - name: check-step
- image: mirror.gcr.io/ubuntu
- script: |
- if [ "$(cat $(workspaces.custom-workspace-check-task.path)/foo)" != "/workspace/custom-workspace-write-task" ]; then
- echo $(cat $(workspaces.custom-workspace-check-task.path)/foo)
- exit 1
- fi
- workspaces:
- - name: custom-workspace-check-task
- workspaces:
- - name: custom-workspace-check-task
- workspace: custom-workspace
- runAfter:
- - read-task
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- if err := checkPipelineRunConditionSucceeded(resolvedPR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if resolvedPR.Spec.Workspaces[0].Name != "custom-workspace" {
- t.Errorf("Expect vendor service to provide Workspace 'custom-workspace' but it has: %s", resolvedPR.Spec.Workspaces[0].Name)
- }
-
- if resolvedPR.Status.PipelineSpec.Workspaces[0].Name != "custom-workspace" {
- t.Errorf("Expect vendor service to provide Workspace 'custom-workspace' in PipelineRun.Status.TaskSpec but it has: %s", resolvedPR.Spec.Workspaces[0].Name)
- }
-
- // TODO add more tests for WorkSpace Declaration test for PipelineTask Workspace in a separate test
-}
-
-func TestPipelineTaskTimeout(t *testing.T) {
- expectedFailedStatus := true
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- pipelineSpec:
- tasks:
- - name: timeout
- timeout: 15s
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- command: ['/bin/sh']
- args: ['-c', 'sleep 15001']
-`, helpers.ObjectNameForTest(t))
-
- // Execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t, expectedFailedStatus)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- // TODO to examine PipelineRunReason when https://github.com/tektoncd/pipeline/issues/7573 is fixed - PipelineTaskTimeout
- if err := checkPipelineRunConditionSucceeded(resolvedPR.Status, FailureConditionStatus, "Failed"); err != nil {
- t.Error(err)
- }
-}
-
-// TestPipelineRunTimeout examines the Timeout behaviour for
-// PipelineRun level. It creates a TaskRun with Timeout and wait in the Step of the
-// inline Task for the time length longer than the specified Timeout.
-// The TaskRun is expected to fail with the Reason `TaskRunTimeout`.
-func TestPipelineRunTimeout(t *testing.T) {
- expectedFailedStatus := true
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- timeouts:
- tasks: 15s
- pipelineSpec:
- tasks:
- - name: timeout
- taskSpec:
- steps:
- - image: mirror.gcr.io/busybox
- command: ['/bin/sh']
- args: ['-c', 'sleep 15001']
-`, helpers.ObjectNameForTest(t))
-
- // Execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t, expectedFailedStatus)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- if err := checkPipelineRunConditionSucceeded(resolvedPR.Status, FailureConditionStatus, "PipelineRunTimeout"); err != nil {
- t.Error(err)
- }
-
-}
-
-// ** there is no feasible way as in v1 conformance policy to test finally without
-// dependencies: results, param functionality
-func TestPipelineRunTaskFinally(t *testing.T) {
- var inputOp0, inputOp1 = 3, 1
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- pipelineSpec:
- params:
- - name: a
- type: string
- - name: b
- type: string
- results:
- - name: task-result
- description: "grabbing results from the tasks section"
- value: $(tasks.multiply-inputs.results.product)
- - name: finally-result
- description: "grabbing results from the finally section"
- value: $(finally.exponent.results.product)
- tasks:
- - name: multiply-inputs
- taskSpec:
- results:
- - name: product
- description: The product of the two provided integers
- steps:
- - name: product
- image: mirror.gcr.io/bash
- script: |
- #!/usr/bin/env bash
- echo -n $(( "$(params.a)" * "$(params.b)" )) | tee $(results.product.path)
- params:
- - name: a
- value: "$(params.a)"
- - name: b
- value: "$(params.b)"
- finally:
- - name: exponent
- taskSpec:
- results:
- - name: product
- description: The product of the two provided integers
- steps:
- - name: product
- image: mirror.gcr.io/bash
- script: |
- #!/usr/bin/env bash
- echo -n $(( "$(params.a)" * "$(params.b)" )) | tee $(results.product.path)
- params:
- - name: a
- value: "$(tasks.multiply-inputs.results.product)$(tasks.multiply-inputs.results.product)"
- - name: b
- value: "$(tasks.multiply-inputs.results.product)$(tasks.multiply-inputs.results.product)"
- params:
- - name: a
- value: %s
- - name: b
- value: %s
-`, helpers.ObjectNameForTest(t), strconv.Itoa(inputOp0), strconv.Itoa(inputOp1))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
- if len(resolvedPR.Status.Conditions) != 1 {
- t.Errorf("Expect vendor service to populate 1 Condition but no")
- }
-
- expectedFinallyResultVal := strconv.Itoa((inputOp0*10 + inputOp0) * (inputOp1*10 + inputOp1) * inputOp0 * inputOp1)
-
- for _, res := range resolvedPR.Status.Results {
- if res.Name == "finally-result" {
- if res.Value.StringVal != expectedFinallyResultVal {
- t.Errorf("Expect vendor service to provide finally task computation to have resultVal %s, but has: %s", expectedFinallyResultVal, res.Value.StringVal)
- }
- }
- }
-}
-
-// TestPipelineRunConditions examines population of Conditions
-// fields. It creates the a PipelineRun with minimal specifications and checks the
-// required Condition Status and Type.
-func TestPipelineRunConditions(t *testing.T) {
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- pipelineSpec:
- tasks:
- - name: pipeline-task-0
- taskSpec:
- steps:
- - name: add
- image: mirror.gcr.io/ubuntu
- script:
- echo Hello world!
-`, helpers.ObjectNameForTest(t))
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- if err := checkPipelineRunConditionSucceeded(resolvedPR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-}
-
-func TestPipelineRunChildReferences(t *testing.T) {
- prName := helpers.ObjectNameForTest(t)
- pt0, pt1 := "pipeline-task-0", "pipeline-task-1"
- expectedChildRefs := map[string]string{
- pt0: prName + "-" + pt0,
- pt1: prName + "-" + pt1,
- }
-
- inputYAML := fmt.Sprintf(`
-apiVersion: tekton.dev/v1
-kind: PipelineRun
-metadata:
- name: %s
-spec:
- pipelineSpec:
- tasks:
- - name: %s
- taskSpec:
- steps:
- - name: hello-step
- image: mirror.gcr.io/ubuntu
- script:
- echo Hello world!
- - name: %s
- taskSpec:
- steps:
- - name: hell-step
- image: mirror.gcr.io/ubuntu
- script:
- echo Hello world!
-`, prName, pt0, pt1)
-
- // The execution of Pipeline CRDs that should be implemented by Vendor service
- outputYAML, err := ProcessAndSendToTekton(inputYAML, PipelineRunInputType, t)
- if err != nil {
- t.Fatalf("Vendor service failed processing inputYAML: %s", err)
- }
-
- // Parse and validate output YAML
- resolvedPR := parse.MustParseV1PipelineRun(t, outputYAML)
-
- if err := checkPipelineRunConditionSucceeded(resolvedPR.Status, SucceedConditionStatus, "Succeeded"); err != nil {
- t.Error(err)
- }
-
- if len(resolvedPR.Status.ChildReferences) != 2 {
- t.Errorf("Expect vendor service to have 2 ChildReferences but it has: %v", len(resolvedPR.Status.ChildReferences))
- }
-
- for _, cr := range resolvedPR.Status.ChildReferences {
- if childRefName, ok := expectedChildRefs[cr.PipelineTaskName]; ok {
- if childRefName != cr.Name {
- t.Errorf("Expect vendor service to populate ChildReferenceStatus Name %s but it has: %s", childRefName, cr.Name)
- }
- } else {
- t.Errorf("Does not expect vendor service to populate ChildReferenceStatus PipelineTaskName: %s", cr.PipelineTaskName)
- }
- }
-
-}
diff --git a/upstream/test/conformance_test.go b/upstream/test/conformance_test.go
index 9dffa035751..1a93de0812c 100644
--- a/upstream/test/conformance_test.go
+++ b/upstream/test/conformance_test.go
@@ -193,7 +193,7 @@ spec:
if d := cmp.Diff(tr.Status.Steps, tc.expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" {
t.Fatalf("-got, +want: %v", d)
}
- // Note(chmouel): Sometime we have docker-pullable:// or mirror.gcr.io as prefix, so let only compare the suffix
+ // Note(chmouel): Sometime we have docker-pullable:// or docker.io/library as prefix, so let only compare the suffix
if !strings.HasSuffix(tr.Status.Steps[0].ImageID, fqImageName) {
t.Fatalf("`ImageID: %s` does not end with `%s`", tr.Status.Steps[0].ImageID, fqImageName)
}
diff --git a/upstream/test/controller.go b/upstream/test/controller.go
index d4cc5506135..93a3840ae86 100644
--- a/upstream/test/controller.go
+++ b/upstream/test/controller.go
@@ -37,10 +37,10 @@ import (
fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/fake"
faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/task/fake"
faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/fake"
+ fakestepactioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/stepaction/fake"
fakeverificationpolicyinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/verificationpolicy/fake"
fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/fake"
fakecustomruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/fake"
- fakestepactioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/stepaction/fake"
fakeresolutionclientset "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake"
resolutioninformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1"
fakeresolutionrequestclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake"
@@ -74,7 +74,7 @@ type Data struct {
Pipelines []*v1.Pipeline
TaskRuns []*v1.TaskRun
Tasks []*v1.Task
- StepActions []*v1beta1.StepAction
+ StepActions []*v1alpha1.StepAction
ClusterTasks []*v1beta1.ClusterTask
CustomRuns []*v1beta1.CustomRun
Pods []*corev1.Pod
@@ -104,7 +104,7 @@ type Informers struct {
Run informersv1alpha1.RunInformer
CustomRun informersv1beta1.CustomRunInformer
Task informersv1.TaskInformer
- StepAction informersv1beta1.StepActionInformer
+ StepAction informersv1alpha1.StepActionInformer
ClusterTask informersv1beta1.ClusterTaskInformer
Pod coreinformers.PodInformer
ConfigMap coreinformers.ConfigMapInformer
@@ -236,7 +236,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers
c.Pipeline.PrependReactor("*", "stepactions", AddToInformer(t, i.StepAction.Informer().GetIndexer()))
for _, sa := range d.StepActions {
sa := sa.DeepCopy() // Avoid assumptions that the informer's copy is modified.
- if _, err := c.Pipeline.TektonV1beta1().StepActions(sa.Namespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
+ if _, err := c.Pipeline.TektonV1alpha1().StepActions(sa.Namespace).Create(ctx, sa, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
diff --git a/upstream/test/conversion_test.go b/upstream/test/conversion_test.go
index 9e14ed7dea0..489fad5aced 100644
--- a/upstream/test/conversion_test.go
+++ b/upstream/test/conversion_test.go
@@ -33,17 +33,21 @@ import (
)
var (
- filterLabels = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Labels")
- filterAnnotations = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Annotations")
- filterV1TaskRunStatus = cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime", "Artifacts")
- filterV1PipelineRunStatus = cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "StartTime", "CompletionTime")
- filterV1beta1TaskRunStatus = cmpopts.IgnoreFields(v1beta1.TaskRunStatusFields{}, "StartTime", "CompletionTime")
- filterV1beta1PipelineRunStatus = cmpopts.IgnoreFields(v1beta1.PipelineRunStatusFields{}, "StartTime", "CompletionTime")
- filterContainerStateTerminated = cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID", "Message")
- filterV1StepState = cmpopts.IgnoreFields(v1.StepState{}, "Name", "ImageID", "Container")
- filterV1beta1StepState = cmpopts.IgnoreFields(v1beta1.StepState{}, "Name", "ImageID", "ContainerName")
- filterV1TaskRunSA = cmpopts.IgnoreFields(v1.TaskRunSpec{}, "ServiceAccountName")
- filterV1PipelineRunSA = cmpopts.IgnoreFields(v1.PipelineTaskRunTemplate{}, "ServiceAccountName")
+ filterLabels = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Labels")
+ filterAnnotations = cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Annotations")
+ filterV1TaskRunStatus = cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime")
+ filterV1PipelineRunStatus = cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "StartTime", "CompletionTime")
+ filterV1beta1TaskRunStatus = cmpopts.IgnoreFields(v1beta1.TaskRunStatusFields{}, "StartTime", "CompletionTime")
+ filterV1beta1PipelineRunStatus = cmpopts.IgnoreFields(v1beta1.PipelineRunStatusFields{}, "StartTime", "CompletionTime")
+ filterContainerStateTerminated = cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID", "Message")
+ filterV1StepState = cmpopts.IgnoreFields(v1.StepState{}, "Name", "ImageID", "Container")
+ filterV1beta1StepState = cmpopts.IgnoreFields(v1beta1.StepState{}, "Name", "ImageID", "ContainerName")
+ filterV1TaskRunSA = cmpopts.IgnoreFields(v1.TaskRunSpec{}, "ServiceAccountName")
+ filterV1beta1TaskRunSA = cmpopts.IgnoreFields(v1beta1.TaskRunSpec{}, "ServiceAccountName")
+ filterV1PipelineRunSA = cmpopts.IgnoreFields(v1.PipelineTaskRunTemplate{}, "ServiceAccountName")
+ filterV1beta1PipelineRunSA = cmpopts.IgnoreFields(v1beta1.PipelineRunSpec{}, "ServiceAccountName")
+ filterV1RefSourceImageDigest = cmpopts.IgnoreFields(v1.RefSource{}, "Digest")
+ filterV1beta1RefSourceImageDigest = cmpopts.IgnoreFields(v1beta1.RefSource{}, "Digest")
filterMetadata = []cmp.Option{filterTypeMeta, filterObjectMeta, filterAnnotations}
filterV1TaskRunFields = []cmp.Option{filterTypeMeta, filterObjectMeta, filterLabels, filterAnnotations, filterCondition, filterV1TaskRunStatus, filterContainerStateTerminated, filterV1StepState}
@@ -94,7 +98,7 @@ spec:
runAsNonRoot: true
sidecars:
- name: server
- image: mirror.gcr.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
command: ['/bin/bash']
args: ['-c', 'gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key']
workingDir: /dir
@@ -170,7 +174,7 @@ spec:
runAsNonRoot: true
sidecars:
- name: server
- image: mirror.gcr.io/alpine/git:v2.26.2
+ image: alpine/git:v2.26.2
command: ['/bin/bash']
args: ['-c', 'gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key']
workingDir: /dir
@@ -224,7 +228,7 @@ spec:
- name: task1-result
value: task1-val
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
onError: continue
name: exit-with-255
script: |
@@ -246,7 +250,7 @@ spec:
type: string
steps:
- name: verify-status
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ $(params.echoStatus) == "Succeeded" ]
then
@@ -278,7 +282,7 @@ spec:
- name: task1-result
value: task1-val
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
onError: continue
name: exit-with-255
script: |
@@ -300,7 +304,7 @@ spec:
type: string
steps:
- name: verify-status
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
if [ $(params.echoStatus) == "Succeeded" ]
then
@@ -320,7 +324,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "Hello World!"
@@ -353,7 +357,7 @@ spec:
taskSpec:
steps:
- computeResources: {}
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
name: echo
script: |
#!/usr/bin/env bash
@@ -372,7 +376,7 @@ status:
taskSpec:
steps:
- computeResources: {}
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
name: echo
script: |
#!/usr/bin/env bash
@@ -408,7 +412,7 @@ spec:
taskSpec:
steps:
- computeResources: {}
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
name: echo
script: |
#!/usr/bin/env bash
@@ -438,7 +442,7 @@ spec:
taskSpec:
steps:
- computeResources: {}
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
name: echo
script: |
#!/usr/bin/env bash
@@ -454,7 +458,7 @@ status:
taskSpec:
steps:
- computeResources: {}
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
name: echo
script: |
#!/usr/bin/env bash
@@ -493,7 +497,7 @@ spec:
taskSpec:
steps:
- name: echo-hello
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
ls $(workspaces.dir.path)
echo hello
@@ -524,7 +528,7 @@ spec:
taskSpec:
steps:
- name: echo-hello
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
ls $(workspaces.dir.path)
echo hello
@@ -540,7 +544,7 @@ status:
name: cluster-task-pipeline-4
steps:
- name: "echo-hello"
- image: "mirror.gcr.io/ubuntu"
+ image: "ubuntu"
script: |
ls $(workspaces.dir.path)
echo hello
@@ -578,7 +582,7 @@ spec:
taskSpec:
steps:
- name: echo-hello
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
ls $(workspaces.dir.path)
echo hello
@@ -611,7 +615,7 @@ spec:
taskSpec:
steps:
- name: echo-hello
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
ls $(workspaces.dir.path)
echo hello
@@ -635,7 +639,7 @@ status:
name: cluster-task-pipeline-4
steps:
- name: "echo-hello"
- image: "mirror.gcr.io/ubuntu"
+ image: "ubuntu"
script: |
ls $(workspaces.dir.path)
echo hello
@@ -650,6 +654,202 @@ status:
name: %s-hello-task
pipelineTaskName: hello-task
`
+
+ v1beta1TaskWithBundleYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ steps:
+ - name: hello
+ image: alpine
+ script: 'echo Hello'
+`
+
+ v1beta1PipelineWithBundleYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+`
+
+ v1beta1TaskRunWithBundleYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ taskRef:
+ name: %s
+ bundle: %s
+`
+
+ v1beta1PipelineRunWithBundleYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ pipelineRef:
+ name: %s
+ bundle: %s
+`
+
+ v1TaskRunWithBundleExpectedYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ serviceAccountName: default
+ timeout: 1h
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ - name: kind
+ value: Task
+status:
+ conditions:
+ - type: Succeeded
+ status: "True"
+ reason: "Succeeded"
+ podName: %s-pod
+ taskSpec:
+ steps:
+ - computeResources: {}
+ image: alpine
+ name: hello
+ script: 'echo Hello'
+ steps:
+ - image: alpine
+ name: hello
+ script: 'echo Hello'
+ terminationReason: Completed
+ terminated:
+ reason: Completed
+`
+
+ v1PipelineRunWithBundleExpectedYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ taskRunTemplate:
+ timeouts:
+ pipeline: 1h
+ pipelineRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ - name: kind
+ value: Pipeline
+status:
+ conditions:
+ - type: Succeeded
+ status: "True"
+ reason: "Succeeded"
+ pipelineSpec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ childReferences:
+ - apiVersion: tekton.dev/v1
+ kind: TaskRun
+ name: %s-hello-world
+ pipelineTaskName: hello-world
+`
+
+ v1beta1TaskRunWithBundleRoundTripYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ timeout: 1h
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ - name: kind
+ value: Task
+status:
+ conditions:
+ - type: Succeeded
+ status: "True"
+ reason: "Succeeded"
+ podName: %s-pod
+ taskSpec:
+ steps:
+ - computeResources: {}
+ image: alpine
+ name: hello
+ script: 'echo Hello'
+ steps:
+ - image: alpine
+ name: hello
+ script: 'echo Hello'
+ terminated:
+ reason: Completed
+`
+
+ v1beta1PipelineRunWithBundleRoundTripYaml = `
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ timeouts:
+ pipeline: 1h
+ pipelineRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ - name: kind
+ value: Pipeline
+status:
+ conditions:
+ - type: Succeeded
+ status: "True"
+ reason: "Succeeded"
+ pipelineSpec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ resolver: bundles
+ params:
+ - name: bundle
+ value: %s
+ - name: name
+ value: %s
+ childReferences:
+ - apiVersion: tekton.dev/v1
+ kind: TaskRun
+ name: %s-hello-world
+ pipelineTaskName: hello-world
+`
)
// TestTaskCRDConversion first creates a v1beta1 Task CRD using v1beta1Clients and
@@ -948,3 +1148,118 @@ func TestPipelineRunCRDConversion(t *testing.T) {
t.Errorf("-want, +got: %v", d)
}
}
+
+// TestBundleConversion tests v1beta1 bundle syntax converted into v1 since it has
+// been deprecated in v1 and it would be converted into bundle resolver in pipelineRef
+// and taskRef. It sets up a registry for a bundle of a v1beta1 Task and Pipeline
+// and uses the v1beta1 TaskRef/ PipelineRef to test the conversion from v1beta1 bundle
+// syntax to a v1 bundle resolver and then it tests roundtrip back to v1beta1 bundle
+// resolver syntax.
+func TestBundleConversion(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ t.Parallel()
+
+ c, namespace := setup(ctx, t, withRegistry, bundleFeatureFlags)
+ knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
+ defer tearDown(ctx, t, c, namespace)
+
+ repo := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/tektonbundlessimple"
+ taskName := helpers.ObjectNameForTest(t)
+ pipelineName := helpers.ObjectNameForTest(t)
+ task := parse.MustParseV1beta1Task(t, fmt.Sprintf(v1beta1TaskWithBundleYaml, taskName, namespace))
+ pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(v1beta1PipelineWithBundleYaml, pipelineName, namespace, repo, taskName))
+ setupBundle(ctx, t, c, namespace, repo, task, pipeline)
+
+ v1beta1TaskRunName := helpers.ObjectNameForTest(t)
+ v1beta1TaskRun := parse.MustParseV1beta1TaskRun(t, fmt.Sprintf(v1beta1TaskRunWithBundleYaml, v1beta1TaskRunName, namespace, taskName, repo))
+ v1TaskRunExpected := parse.MustParseV1TaskRun(t, fmt.Sprintf(v1TaskRunWithBundleExpectedYaml, v1beta1TaskRunName, namespace, repo, taskName, v1beta1TaskRunName))
+ v1beta1TaskRunRoundTripExpected := parse.MustParseV1beta1TaskRun(t, fmt.Sprintf(v1beta1TaskRunWithBundleRoundTripYaml, v1beta1TaskRunName, namespace, repo, taskName, v1beta1TaskRunName))
+
+ v1TaskRunExpected.Status.Provenance = &v1.Provenance{
+ FeatureFlags: getFeatureFlagsBaseOnAPIFlag(t),
+ RefSource: &v1.RefSource{
+ URI: repo,
+ Digest: map[string]string{"sha256": "a123"},
+ EntryPoint: taskName,
+ },
+ }
+ v1beta1TaskRunRoundTripExpected.Status.Provenance = &v1beta1.Provenance{
+ FeatureFlags: getFeatureFlagsBaseOnAPIFlag(t),
+ RefSource: &v1beta1.RefSource{
+ URI: repo,
+ Digest: map[string]string{"sha256": "a123"},
+ EntryPoint: taskName,
+ },
+ }
+
+ if _, err := c.V1beta1TaskRunClient.Create(ctx, v1beta1TaskRun, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create v1beta1 TaskRun: %s", err)
+ }
+ if err := WaitForTaskRunState(ctx, c, v1beta1TaskRunName, Succeed(v1beta1TaskRunName), v1beta1TaskRunName, "v1beta1"); err != nil {
+ t.Fatalf("Failed waiting for v1beta1 TaskRun done: %v", err)
+ }
+
+ v1TaskRunGot, err := c.V1TaskRunClient.Get(ctx, v1beta1TaskRunName, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Couldn't get expected v1 TaskRun for %s: %s", v1beta1TaskRunName, err)
+ }
+ if d := cmp.Diff(v1TaskRunExpected, v1TaskRunGot, append([]cmp.Option{filterV1RefSourceImageDigest, filterV1TaskRunSA}, filterV1TaskRunFields...)...); d != "" {
+ t.Errorf("-want, +got: %v", d)
+ }
+
+ v1beta1TaskRunRoundTrip := &v1beta1.TaskRun{}
+ if err := v1beta1TaskRunRoundTrip.ConvertFrom(context.Background(), v1TaskRunGot); err != nil {
+ t.Fatalf("Failed to convert roundtrip v1beta1TaskRunGot ConvertFrom v1 = %v", err)
+ }
+ if d := cmp.Diff(v1beta1TaskRunRoundTripExpected, v1beta1TaskRunRoundTrip, append([]cmp.Option{filterV1beta1RefSourceImageDigest, filterV1beta1TaskRunSA}, filterV1beta1TaskRunFields...)...); d != "" {
+ t.Errorf("-want, +got: %v", d)
+ }
+
+ v1beta1ToV1PipelineRunName := helpers.ObjectNameForTest(t)
+ v1beta1PipelineRun := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(v1beta1PipelineRunWithBundleYaml, v1beta1ToV1PipelineRunName, namespace, pipelineName, repo))
+ v1PipelineRunExpected := parse.MustParseV1PipelineRun(t, fmt.Sprintf(v1PipelineRunWithBundleExpectedYaml, v1beta1ToV1PipelineRunName, namespace, repo, pipelineName, repo, taskName, v1beta1ToV1PipelineRunName))
+ v1beta1PRRoundTripExpected := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(v1beta1PipelineRunWithBundleRoundTripYaml, v1beta1ToV1PipelineRunName, namespace, repo, pipelineName, repo, taskName, v1beta1ToV1PipelineRunName))
+
+ v1PipelineRunExpected.Status.Provenance = &v1.Provenance{
+ FeatureFlags: getFeatureFlagsBaseOnAPIFlag(t),
+ RefSource: &v1.RefSource{
+ URI: repo,
+ Digest: map[string]string{"sha256": "a123"},
+ EntryPoint: pipelineName,
+ },
+ }
+ v1beta1PRRoundTripExpected.Status.Provenance = &v1beta1.Provenance{
+ FeatureFlags: getFeatureFlagsBaseOnAPIFlag(t),
+ RefSource: &v1beta1.RefSource{
+ URI: repo,
+ Digest: map[string]string{"sha256": "a123"},
+ EntryPoint: pipelineName,
+ },
+ }
+
+ if _, err := c.V1beta1PipelineRunClient.Create(ctx, v1beta1PipelineRun, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create v1beta1 PipelineRun: %s", err)
+ }
+ if err := WaitForPipelineRunState(ctx, c, v1beta1ToV1PipelineRunName, timeout, Succeed(v1beta1ToV1PipelineRunName), v1beta1ToV1PipelineRunName, "v1beta1"); err != nil {
+ t.Fatalf("Failed waiting for v1beta1 PipelineRun done: %v", err)
+ }
+
+ v1PipelineRunGot, err := c.V1PipelineRunClient.Get(ctx, v1beta1ToV1PipelineRunName, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Couldn't get expected v1 PipelineRun for %s: %s", v1beta1ToV1PipelineRunName, err)
+ }
+ if d := cmp.Diff(v1PipelineRunExpected, v1PipelineRunGot, append([]cmp.Option{filterV1RefSourceImageDigest, filterV1PipelineRunSA}, filterV1PipelineRunFields...)...); d != "" {
+ t.Errorf("-want, +got: %v", d)
+ }
+
+ v1beta1PRRoundTrip := &v1beta1.PipelineRun{}
+ if err := v1beta1PRRoundTrip.ConvertFrom(context.Background(), v1PipelineRunGot); err != nil {
+ t.Fatalf("Error roundtrip v1beta1PipelineRun ConvertFrom v1PipelineRunGot = %v", err)
+ }
+ if d := cmp.Diff(v1beta1PRRoundTripExpected, v1beta1PRRoundTrip, append([]cmp.Option{filterV1beta1RefSourceImageDigest, filterV1beta1PipelineRunSA}, filterV1beta1PipelineRunFields...)...); d != "" {
+ t.Errorf("-want, +got: %v", d)
+ }
+}
diff --git a/upstream/test/custom-task-ctrls/wait-task-beta/example_customrun_matrix_results.yaml b/upstream/test/custom-task-ctrls/wait-task-beta/example_customrun_matrix_results.yaml
index 80ed7a1f520..0f43f79d3ab 100644
--- a/upstream/test/custom-task-ctrls/wait-task-beta/example_customrun_matrix_results.yaml
+++ b/upstream/test/custom-task-ctrls/wait-task-beta/example_customrun_matrix_results.yaml
@@ -11,7 +11,7 @@ spec:
- name: duration
steps:
- name: echo
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "$(params.duration)"
---
@@ -25,7 +25,7 @@ spec:
type: array
steps:
- name: produce-a-list-of-durations
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"10s\",\"2s\",\"5s\"]" | tee $(results.durations.path)
diff --git a/upstream/test/custom-task-ctrls/wait-task-beta/example_pipelinerun.yaml b/upstream/test/custom-task-ctrls/wait-task-beta/example_pipelinerun.yaml
index 91d5fa813b1..513adcc5394 100644
--- a/upstream/test/custom-task-ctrls/wait-task-beta/example_pipelinerun.yaml
+++ b/upstream/test/custom-task-ctrls/wait-task-beta/example_pipelinerun.yaml
@@ -8,7 +8,7 @@ spec:
- name: before
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo before wait
- name: wait
taskRef:
@@ -21,6 +21,6 @@ spec:
- name: after
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo after wait
runAfter: ['wait']
diff --git a/upstream/test/custom-task-ctrls/wait-task-beta/go.mod b/upstream/test/custom-task-ctrls/wait-task-beta/go.mod
index 626842418d9..f49b3abda70 100644
--- a/upstream/test/custom-task-ctrls/wait-task-beta/go.mod
+++ b/upstream/test/custom-task-ctrls/wait-task-beta/go.mod
@@ -6,9 +6,9 @@ require (
github.com/emicklei/go-restful v2.16.0+incompatible // indirect
github.com/google/go-cmp v0.6.0
github.com/tektoncd/pipeline v0.53.1
- k8s.io/api v0.27.16
- k8s.io/apimachinery v0.27.16
- k8s.io/client-go v0.27.16
+ k8s.io/api v0.27.13
+ k8s.io/apimachinery v0.27.13
+ k8s.io/client-go v0.27.13
k8s.io/utils v0.0.0-20230505201702-9f6742963106
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7
)
diff --git a/upstream/test/custom-task-ctrls/wait-task-beta/go.sum b/upstream/test/custom-task-ctrls/wait-task-beta/go.sum
index dc936579a74..39cf8077312 100644
--- a/upstream/test/custom-task-ctrls/wait-task-beta/go.sum
+++ b/upstream/test/custom-task-ctrls/wait-task-beta/go.sum
@@ -3661,8 +3661,8 @@ k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g=
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
k8s.io/api v0.26.5/go.mod h1:O7ICW7lj6+ZQQQ3cxekgCoW+fnGo5kWT0nTHkLZ5grc=
k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E=
-k8s.io/api v0.27.16 h1:70IBoTuiPfd+Tm68WH0tGXQRSQq0R1xnbyhTRe8WYQY=
-k8s.io/api v0.27.16/go.mod h1:5j0Cgo6X4qovBOu3OjzRwETDEYqMxq2qafhDQXOPy3A=
+k8s.io/api v0.27.13 h1:d49LYs1dh+JMMDNYQSu8FhEzCjc2TNpYvDWoSGAKs80=
+k8s.io/api v0.27.13/go.mod h1:W3lYMPs34i0XQA+cmKfejve+HwbRZjy67fL05RyJUTo=
k8s.io/apiextensions-apiserver v0.26.5 h1:VJ946z9RjyCPn3qiz4Kus/UYjCRrdn1xUvEsJFvN5Yo=
k8s.io/apiextensions-apiserver v0.26.5/go.mod h1:Olsde7ZNWnyz9rsL13iXYXmL1h7kWujtKeC3yWVCDPo=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
@@ -3676,8 +3676,8 @@ k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5
k8s.io/apimachinery v0.26.5/go.mod h1:HUvk6wrOP4v22AIYqeCGSQ6xWCHo41J9d6psb3temAg=
k8s.io/apimachinery v0.27.1/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM=
k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
-k8s.io/apimachinery v0.27.16 h1:Nmbei3P/6w6vxbNxV8/sDCZz+TQrJ9A4+bVIRjDufuM=
-k8s.io/apimachinery v0.27.16/go.mod h1:TWo+8wOIz3CytsrlI9k/LBWXLRr9dqf5hRSCbbggMAg=
+k8s.io/apimachinery v0.27.13 h1:xDAnOWaRVNSkaKdfB0Ab11hixH90KGTbLwEHMloMjFM=
+k8s.io/apimachinery v0.27.13/go.mod h1:TWo+8wOIz3CytsrlI9k/LBWXLRr9dqf5hRSCbbggMAg=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
@@ -3692,8 +3692,8 @@ k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4=
k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU=
k8s.io/client-go v0.26.5/go.mod h1:/CYyNt+ZLMvWqMF8h1SvkUXz2ujFWQLwdDrdiQlZ5X0=
k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA=
-k8s.io/client-go v0.27.16 h1:x06Jk6/SIQQ6kAsWs5uzQIkBLHtcAQlbTAgmj1tZzG0=
-k8s.io/client-go v0.27.16/go.mod h1:bPZUNRj8XsHa+JVS5jU6qeU2H/Za8+7riWA08FUjaA8=
+k8s.io/client-go v0.27.13 h1:SfUbIukb6BSqaadlYRX0AzMoN6+e+9FZGEKqfisidho=
+k8s.io/client-go v0.27.13/go.mod h1:I9SBaI28r6ii465Fb0dTpf5O3adOnDwNBoeqlDNbbFg=
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
k8s.io/code-generator v0.26.5/go.mod h1:iWTVFxfBX+RYe0bXjKqSM83KJF8eimor/izQInvq/60=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
diff --git a/upstream/test/custom_task_test.go b/upstream/test/custom_task_test.go
index 61bcac7af00..001e61285a0 100644
--- a/upstream/test/custom_task_test.go
+++ b/upstream/test/custom_task_test.go
@@ -111,7 +111,7 @@ spec:
steps:
- args: ['-c', 'echo $(input-result-from-custom-task-ref) $(input-result-from-custom-task-spec)']
command: ['/bin/bash']
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
`, pipelineRunName, betaAPIVersion, kind, betaAPIVersion, kind, customTaskRawSpec)),
metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err)
diff --git a/upstream/test/dag_test.go b/upstream/test/dag_test.go
index afd8f3281bf..e5fc176243e 100644
--- a/upstream/test/dag_test.go
+++ b/upstream/test/dag_test.go
@@ -70,9 +70,9 @@ spec:
results:
- name: result
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: 'echo $(params["text"])'
- - image: mirror.gcr.io/busybox
+ - image: busybox
# Sleep for N seconds so that we can check that tasks that
# should be run in parallel have overlap.
script: |
diff --git a/upstream/test/duplicate_test.go b/upstream/test/duplicate_test.go
index bd606271edf..508c80735b4 100644
--- a/upstream/test/duplicate_test.go
+++ b/upstream/test/duplicate_test.go
@@ -47,7 +47,7 @@ func TestDuplicatePodTaskRun(t *testing.T) {
// The number of builds generated has a direct impact on test
// runtime and is traded off against proving the taskrun
// reconciler's efficacy at not duplicating pods.
- for range 5 {
+ for i := 0; i < 5; i++ {
wg.Add(1)
taskrunName := helpers.ObjectNameForTest(t)
t.Logf("Creating taskrun %q.", taskrunName)
@@ -59,7 +59,7 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/echo']
args: ['simple']
`, taskrunName, namespace))
diff --git a/upstream/test/e2e-tests-kind-prow-alpha.env b/upstream/test/e2e-tests-kind-prow-alpha.env
index 5ab894c2ac7..255bdf63579 100644
--- a/upstream/test/e2e-tests-kind-prow-alpha.env
+++ b/upstream/test/e2e-tests-kind-prow-alpha.env
@@ -8,5 +8,3 @@ ENABLE_STEP_ACTIONS=true
ENABLE_CEL_IN_WHENEXPRESSION=true
ENABLE_PARAM_ENUM=true
ENABLE_ARTIFACTS=true
-ENABLE_CONCISE_RESOLVER_SYNTAX=true
-ENABLE_KUBERNETES_SIDECAR=true
diff --git a/upstream/test/e2e-tests-kind-prow-beta.env b/upstream/test/e2e-tests-kind-prow-beta.env
index acf7b599ce8..8dae8c618fe 100644
--- a/upstream/test/e2e-tests-kind-prow-beta.env
+++ b/upstream/test/e2e-tests-kind-prow-beta.env
@@ -2,7 +2,5 @@ SKIP_INITIALIZE=true
PIPELINE_FEATURE_GATE=beta
EMBEDDED_STATUS_GATE=minimal
RUN_YAML_TESTS=true
-RESULTS_FROM=sidecar-logs
-ENABLE_STEP_ACTIONS=true
KO_DOCKER_REPO=registry.local:5000
E2E_GO_TEST_TIMEOUT=40m
diff --git a/upstream/test/e2e-tests.sh b/upstream/test/e2e-tests.sh
index 9d78515fdb5..a291f27e4ba 100755
--- a/upstream/test/e2e-tests.sh
+++ b/upstream/test/e2e-tests.sh
@@ -32,8 +32,6 @@ ENABLE_STEP_ACTIONS=${ENABLE_STEP_ACTIONS:="false"}
ENABLE_CEL_IN_WHENEXPRESSION=${ENABLE_CEL_IN_WHENEXPRESSION:="false"}
ENABLE_PARAM_ENUM=${ENABLE_PARAM_ENUM:="false"}
ENABLE_ARTIFACTS=${ENABLE_ARTIFACTS:="false"}
-ENABLE_CONCISE_RESOLVER_SYNTAX=${ENABLE_CONCISE_RESOLVER_SYNTAX:="false"}
-ENABLE_KUBERNETES_SIDECAR=${ENABLE_KUBERNETES_SIDECAR:="false"}
failed=0
# Script entry point.
@@ -132,30 +130,6 @@ function set_enable_artifacts() {
kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch"
}
-function set_enable_concise_resolver_syntax() {
- local method="$1"
- if [ "$method" != "false" ] && [ "$method" != "true" ]; then
- printf "Invalid value for enable-concise-resolver-syntax %s\n" ${method}
- exit 255
- fi
- printf "Setting enable-concise-resolver-syntax to %s\n", ${method}
- jsonpatch=$(printf "{\"data\": {\"enable-concise-resolver-syntax\": \"%s\"}}" $1)
- echo "feature-flags ConfigMap patch: ${jsonpatch}"
- kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch"
-}
-
-function set_enable_kubernetes_sidecar() {
- local method="$1"
- if [ "$method" != "false" ] && [ "$method" != "true" ]; then
- printf "Invalid value for enable-kubernetes-sidecar %s\n" ${method}
- exit 255
- fi
- printf "Setting enable-kubernetes-sidecar to %s\n", ${method}
- jsonpatch=$(printf "{\"data\": {\"enable-kubernetes-sidecar\": \"%s\"}}" $1)
- echo "feature-flags ConfigMap patch: ${jsonpatch}"
- kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch"
-}
-
function run_e2e() {
# Run the integration tests
header "Running Go e2e tests"
@@ -183,8 +157,6 @@ set_enable_step_actions "$ENABLE_STEP_ACTIONS"
set_cel_in_whenexpression "$ENABLE_CEL_IN_WHENEXPRESSION"
set_enable_param_enum "$ENABLE_PARAM_ENUM"
set_enable_artifacts "$ENABLE_ARTIFACTS"
-set_enable_concise_resolver_syntax "$ENABLE_CONCISE_RESOLVER_SYNTAX"
-set_enable_kubernetes_sidecar "$ENABLE_KUBERNETES_SIDECAR"
run_e2e
(( failed )) && fail_test
diff --git a/upstream/test/entrypoint_test.go b/upstream/test/entrypoint_test.go
index 0814a0caaad..26454d8938f 100644
--- a/upstream/test/entrypoint_test.go
+++ b/upstream/test/entrypoint_test.go
@@ -54,10 +54,10 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
workingDir: /workspace
script: 'sleep 3 && touch foo'
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
workingDir: /workspace
script: 'ls foo'
`, epTaskRunName, namespace)), metav1.CreateOptions{}); err != nil {
diff --git a/upstream/test/examples_test.go b/upstream/test/examples_test.go
index 3880bc42022..74a74909858 100644
--- a/upstream/test/examples_test.go
+++ b/upstream/test/examples_test.go
@@ -280,14 +280,13 @@ func imageNamesMapping() map[string]string {
}
case "ppc64le":
return map[string]string{
- "registry": getTestImage(registryImage),
- "node": "node:alpine3.11",
- "gcr.io/cloud-builders/git": "alpine/git:latest",
- "docker@sha256:74e78208fc18da48ddf8b569abe21563730845c312130bd0f0b059746a7e10f5": "ibmcom/docker-ppc64le:19.03-dind",
+ "registry": getTestImage(registryImage),
+ "node": "node:alpine3.11",
+ "gcr.io/cloud-builders/git": "alpine/git:latest",
+ "docker:dind": "ibmcom/docker-ppc64le:19.03-dind",
"docker": "docker:18.06.3",
"mikefarah/yq:3": "danielxlee/yq:2.4.0",
"stedolan/jq": "ibmcom/jq-ppc64le:latest",
- "amd64/ubuntu": "ppc64le/ubuntu",
"gcr.io/kaniko-project/executor:v1.3.0": getTestImage(kanikoImage),
}
}
diff --git a/upstream/test/featureflags.go b/upstream/test/featureflags.go
index a6b6db071f6..673d82cb75f 100644
--- a/upstream/test/featureflags.go
+++ b/upstream/test/featureflags.go
@@ -111,23 +111,19 @@ func requireAllGates(gates map[string]string) func(context.Context, *testing.T,
func getFeatureFlagsBaseOnAPIFlag(t *testing.T) *config.FeatureFlags {
t.Helper()
alphaFeatureFlags, err := config.NewFeatureFlagsFromMap(map[string]string{
- "enable-api-fields": "alpha",
- "results-from": "sidecar-logs",
- "enable-tekton-oci-bundles": "true",
- "enable-step-actions": "true",
- "enable-cel-in-whenexpression": "true",
- "enable-param-enum": "true",
- "enable-artifacts": "true",
- "enable-concise-resolver-syntax": "true",
- "enable-kubernetes-sidecar": "true",
+ "enable-api-fields": "alpha",
+ "results-from": "sidecar-logs",
+ "enable-tekton-oci-bundles": "true",
+ "enable-step-actions": "true",
+ "enable-cel-in-whenexpression": "true",
+ "enable-param-enum": "true",
+ "enable-artifacts": "true",
})
if err != nil {
t.Fatalf("error creating alpha feature flags configmap: %v", err)
}
betaFeatureFlags, err := config.NewFeatureFlagsFromMap(map[string]string{
- "results-from": "sidecar-logs",
- "enable-api-fields": "beta",
- "enable-step-actions": "true",
+ "enable-api-fields": "beta",
})
if err != nil {
t.Fatalf("error creating beta feature flags configmap: %v", err)
diff --git a/upstream/test/git-resolver/gitea.yaml b/upstream/test/git-resolver/gitea.yaml
index 58874241b29..dc085f0634f 100644
--- a/upstream/test/git-resolver/gitea.yaml
+++ b/upstream/test/git-resolver/gitea.yaml
@@ -704,7 +704,7 @@ spec:
fsGroup: 1000
initContainers:
- name: init-directories
- image: "docker.io/gitea/gitea:1.17.1"
+ image: "gitea/gitea:1.17.1"
imagePullPolicy: Always
command: ["/usr/sbin/init_directory_structure.sh"]
env:
@@ -727,7 +727,7 @@ spec:
securityContext:
{}
- name: init-app-ini
- image: "docker.io/gitea/gitea:1.17.1"
+ image: "gitea/gitea:1.17.1"
imagePullPolicy: Always
command: ["/usr/sbin/config_environment.sh"]
env:
@@ -752,7 +752,7 @@ spec:
securityContext:
{}
- name: configure-gitea
- image: "docker.io/gitea/gitea:1.17.1"
+ image: "gitea/gitea:1.17.1"
command: ["/usr/sbin/configure_gitea.sh"]
imagePullPolicy: Always
securityContext:
@@ -781,7 +781,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: gitea
- image: "docker.io/gitea/gitea:1.17.1"
+ image: "gitea/gitea:1.17.1"
imagePullPolicy: Always
env:
# SSH Port values have to be set here as well for openssh configuration
diff --git a/upstream/test/git-resolver/remote-task.yaml b/upstream/test/git-resolver/remote-task.yaml
index 95f0e8d4b27..1737ad12622 100644
--- a/upstream/test/git-resolver/remote-task.yaml
+++ b/upstream/test/git-resolver/remote-task.yaml
@@ -5,6 +5,6 @@ metadata:
spec:
steps:
- name: sleep
- image: mirror.gcr.io/busybox
+ image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 10']
diff --git a/upstream/test/gohelloworld/main.go b/upstream/test/gohelloworld/main.go
index 2cfe8f5a3ef..8c0bd0f8a5b 100644
--- a/upstream/test/gohelloworld/main.go
+++ b/upstream/test/gohelloworld/main.go
@@ -31,7 +31,8 @@ func main() {
log.Print("Hello world sample started.")
http.HandleFunc("/", handler)
- //nolint: gosec
+ //nolint:gosec
+ // #nosec G114 -- see https://github.com/securego/gosec#available-rules
if err := http.ListenAndServe(":8080", nil); err != nil {
panic(err)
}
diff --git a/upstream/test/ignore_step_error_test.go b/upstream/test/ignore_step_error_test.go
index adb2c8564aa..0ba770e96ec 100644
--- a/upstream/test/ignore_step_error_test.go
+++ b/upstream/test/ignore_step_error_test.go
@@ -53,7 +53,7 @@ spec:
steps:
- name: failing-step
onError: continue
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'echo -n 123 | tee $(results.result1.path); exit 1; echo -n 456 | tee $(results.result2.path)'
`, taskRunName, namespace))
diff --git a/upstream/test/ignore_task_error_test.go b/upstream/test/ignore_task_error_test.go
index e8492585cb0..becb0618ab7 100644
--- a/upstream/test/ignore_task_error_test.go
+++ b/upstream/test/ignore_task_error_test.go
@@ -37,7 +37,7 @@ func TestFailingPipelineTaskOnContinue(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "beta"}))
+ c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"}))
knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
defer tearDown(ctx, t, c, namespace)
@@ -57,14 +57,14 @@ spec:
type: string
steps:
- name: failing-step
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'exit 1; echo -n 123 | tee $(results.result1.path)'
- name: order-dep-task
runAfter: ["failed-ignored-task"]
taskSpec:
steps:
- name: foo
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'echo hello'
- name: resource-dep-task
onError: continue
@@ -77,7 +77,7 @@ spec:
type: string
steps:
- name: foo
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'echo $(params.param1)'
`, prName, namespace))
diff --git a/upstream/test/larger_results_sidecar_logs_test.go b/upstream/test/larger_results_sidecar_logs_test.go
index e1c9e54263d..5cd4845b8fc 100644
--- a/upstream/test/larger_results_sidecar_logs_test.go
+++ b/upstream/test/larger_results_sidecar_logs_test.go
@@ -61,6 +61,7 @@ func TestLargerResultsSidecarLogs(t *testing.T) {
}}
for _, td := range tds {
+ td := td
t.Run(td.name, func(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
@@ -90,7 +91,7 @@ func TestLargerResultsSidecarLogs(t *testing.T) {
}
t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace)
- if err := WaitForPipelineRunState(ctx, c, prName, timeout, PipelineRunFailed(prName), "PipelineRunFailed", v1Version); err != nil {
+ if err := WaitForPipelineRunState(ctx, c, prName, timeout, PipelineRunSucceed(prName), "PipelineRunSuccess", v1Version); err != nil {
t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err)
}
cl, _ := c.V1PipelineRunClient.Get(ctx, prName, metav1.GetOptions{})
@@ -133,26 +134,6 @@ func TestLargerResultsSidecarLogs(t *testing.T) {
}
}
-func setUpSidecarLogs(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) {
- t.Helper()
- c, ns := setup(ctx, t)
- configMapData := map[string]string{
- "results-from": "sidecar-logs",
- }
-
- if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil {
- t.Fatal(err)
- }
- return c, ns
-}
-
-func resetSidecarLogs(ctx context.Context, t *testing.T, c *clients, previousResultExtractionMethod string) {
- t.Helper()
- if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{"results-from": previousResultExtractionMethod}); err != nil {
- t.Fatal(err)
- }
-}
-
func getLargerResultsPipelineRun(t *testing.T, namespace string) (*v1.PipelineRun, *v1.PipelineRun, []*v1.TaskRun) {
t.Helper()
pipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
@@ -169,31 +150,16 @@ spec:
- name: result2
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s"| tee $(results.result1.path);
echo -n "%s"| tee $(results.result2.path);
- name: task2
- taskSpec:
- results:
- - name: result1
- - name: result2
- steps:
- - name: step1
- onError: continue
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result2.path);
- # trigger an error
- not-a-command
- # This result will be skipped
- echo -n "%s"| tee $(results.result1.path);
- - name: task3
params:
- name: param1
value: "$(tasks.task1.results.result1)"
- name: param2
- value: "$(tasks.task2.results.result2)"
+ value: "$(tasks.task1.results.result2)"
taskSpec:
params:
- name: param1
@@ -206,28 +172,14 @@ spec:
- name: large-result
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "$(params.param1)">> $(results.large-result.path);
echo -n "$(params.param2)">> $(results.large-result.path);
- - name: failed-task
- runAfter: ["task3"]
- taskSpec:
- results:
- - name: result1
- - name: result2
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result1.path);
- echo -n "%s"| tee $(results.result2.path);
- #trigger a failure
- not-a-command
results:
- name: large-result
- value: $(tasks.task3.results.large-result)
-`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("d", 2000), strings.Repeat("c", 2000), strings.Repeat("e", 2000), strings.Repeat("f", 2000)))
+ value: $(tasks.task2.results.large-result)
+`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000)))
expectedPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
metadata:
name: larger-results-sidecar-logs
@@ -248,33 +200,16 @@ spec:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s"| tee $(results.result1.path);
echo -n "%s"| tee $(results.result2.path);
- name: task2
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- onError: continue
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result2.path);
- # trigger an error
- not-a-command
- # This result will be skipped
- echo -n "%s"| tee $(results.result1.path);
- - name: task3
params:
- name: param1
value: "$(tasks.task1.results.result1)"
- name: param2
- value: "$(tasks.task2.results.result2)"
+ value: "$(tasks.task1.results.result2)"
taskSpec:
params:
- name: param1
@@ -288,29 +223,13 @@ spec:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "$(params.param1)">> $(results.large-result.path);
echo -n "$(params.param2)">> $(results.large-result.path);
- - name: failed-task
- runAfter: ["task3"]
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result1.path);
- echo -n "%s"| tee $(results.result2.path);
- #trigger a failure
- not-a-command
results:
- name: large-result
- value: $(tasks.task3.results.large-result)
+ value: $(tasks.task2.results.large-result)
status:
pipelineSpec:
tasks:
@@ -323,33 +242,16 @@ status:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s"| tee $(results.result1.path);
echo -n "%s"| tee $(results.result2.path);
- name: task2
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- onError: continue
- script: |
- echo -n "%s"| tee $(results.result2.path);
- # trigger an error
- not-a-command
- # This result will be skipped
- echo -n "%s"| tee $(results.result1.path);
- - name: task3
params:
- name: param1
value: "$(tasks.task1.results.result1)"
- name: param2
- value: "$(tasks.task2.results.result2)"
+ value: "$(tasks.task1.results.result2)"
taskSpec:
params:
- name: param1
@@ -363,33 +265,17 @@ status:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "$(params.param1)">> $(results.large-result.path);
echo -n "$(params.param2)">> $(results.large-result.path);
- - name: failed-task
- runAfter: ["task3"]
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result1.path);
- echo -n "%s"| tee $(results.result2.path);
- #trigger a failure
- not-a-command
results:
- name: large-result
- value: $(tasks.task3.results.large-result)
+ value: $(tasks.task2.results.large-result)
results:
- name: large-result
value: %s%s
-`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("d", 2000), strings.Repeat("c", 2000), strings.Repeat("e", 2000), strings.Repeat("f", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("d", 2000), strings.Repeat("c", 2000), strings.Repeat("e", 2000), strings.Repeat("f", 2000), strings.Repeat("a", 2000), strings.Repeat("d", 2000)))
+`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000)))
taskRun1 := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
metadata:
name: larger-results-sidecar-logs-task1
@@ -405,7 +291,7 @@ spec:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s"| tee $(results.result1.path);
echo -n "%s"| tee $(results.result2.path);
@@ -423,7 +309,7 @@ status:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s"| tee /tekton/results/result1;
echo -n "%s"| tee /tekton/results/result2;
@@ -437,66 +323,11 @@ status:
sidecars:
- name: tekton-log-results
container: sidecar-tekton-log-results
- artifacts: {}
`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000)))
taskRun2 := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
metadata:
name: larger-results-sidecar-logs-task2
namespace: %s
-spec:
- serviceAccountName: default
- timeout: 1h
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- onError: continue
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result2.path);
- # trigger an error
- not-a-command
- # This result will be skipped
- echo -n "%s"| tee $(results.result1.path);
-status:
- conditions:
- - type: "Succeeded"
- status: "True"
- reason: "Succeeded"
- podName: larger-results-sidecar-logs-task2-pod
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- onError: continue
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee /tekton/results/result2;
- # trigger an error
- not-a-command
- # This result will be skipped
- echo -n "%s"| tee /tekton/results/result1;
- results:
- - name: result2
- type: string
- value: %s
- sidecars:
- - name: tekton-log-results
- container: sidecar-tekton-log-results
- artifacts: {}
-`, namespace, strings.Repeat("d", 2000), strings.Repeat("c", 2000), strings.Repeat("d", 2000), strings.Repeat("c", 2000), strings.Repeat("d", 2000)))
- taskRun3 := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: larger-results-sidecar-logs-task3
- namespace: %s
spec:
serviceAccountName: default
timeout: 1h
@@ -520,7 +351,7 @@ spec:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "$(params.param1)">> $(results.large-result.path);
echo -n "$(params.param2)">> $(results.large-result.path);
@@ -529,7 +360,7 @@ status:
- type: "Succeeded"
status: "True"
reason: "Succeeded"
- podName: larger-results-sidecar-logs-task3-pod
+ podName: larger-results-sidecar-logs-task2-pod
taskSpec:
params:
- name: param1
@@ -543,7 +374,7 @@ status:
type: string
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n "%s">> /tekton/results/large-result;
echo -n "%s">> /tekton/results/large-result;
@@ -554,60 +385,26 @@ status:
sidecars:
- name: tekton-log-results
container: sidecar-tekton-log-results
- artifacts: {}
-`, namespace, strings.Repeat("a", 2000), strings.Repeat("d", 2000), strings.Repeat("a", 2000), strings.Repeat("d", 2000), strings.Repeat("a", 2000), strings.Repeat("d", 2000)))
- taskRun4 := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: larger-results-sidecar-logs-failed-task
- namespace: %s
-spec:
- serviceAccountName: default
- timeout: 1h
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee $(results.result1.path);
- echo -n "%s"| tee $(results.result2.path);
- #trigger a failure
- not-a-command
-status:
- conditions:
- - type: "Succeeded"
- status: "False"
- reason: "Failed"
- podName: larger-results-sidecar-logs-failed-task-pod
- taskSpec:
- results:
- - name: result1
- type: string
- - name: result2
- type: string
- steps:
- - name: step1
- image: mirror.gcr.io/alpine
- script: |
- echo -n "%s"| tee /tekton/results/result1;
- echo -n "%s"| tee /tekton/results/result2;
- #trigger a failure
- not-a-command
- results:
- - name: result1
- type: string
- value: %s
- - name: result2
- type: string
- value: %s
- sidecars:
- - name: tekton-log-results
- container: sidecar-tekton-log-results
- artifacts: {}
-`, namespace, strings.Repeat("e", 2000), strings.Repeat("f", 2000), strings.Repeat("e", 2000), strings.Repeat("f", 2000), strings.Repeat("e", 2000), strings.Repeat("f", 2000)))
- return pipelineRun, expectedPipelineRun, []*v1.TaskRun{taskRun1, taskRun2, taskRun3, taskRun4}
+`, namespace, strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000), strings.Repeat("a", 2000), strings.Repeat("b", 2000)))
+ return pipelineRun, expectedPipelineRun, []*v1.TaskRun{taskRun1, taskRun2}
+}
+
+func setUpSidecarLogs(ctx context.Context, t *testing.T, fn ...func(context.Context, *testing.T, *clients, string)) (*clients, string) {
+ t.Helper()
+ c, ns := setup(ctx, t)
+ configMapData := map[string]string{
+ "results-from": "sidecar-logs",
+ }
+
+ if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil {
+ t.Fatal(err)
+ }
+ return c, ns
+}
+
+func resetSidecarLogs(ctx context.Context, t *testing.T, c *clients, previousResultExtractionMethod string) {
+ t.Helper()
+ if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{"results-from": previousResultExtractionMethod}); err != nil {
+ t.Fatal(err)
+ }
}
diff --git a/upstream/test/matrix_test.go b/upstream/test/matrix_test.go
index 04475140b02..6dcdec5fe4e 100644
--- a/upstream/test/matrix_test.go
+++ b/upstream/test/matrix_test.go
@@ -70,7 +70,7 @@ spec:
default: ""
steps:
- name: echo
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "$(params.GOARCH) and $(params.version)"
`, namespace))
@@ -85,7 +85,7 @@ spec:
type: array
steps:
- name: produce-a-list-of-results
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"linux/amd64\",\"linux/ppc64le\"]" | tee $(results.GOARCHs.path)
@@ -101,7 +101,7 @@ spec:
type: array
steps:
- name: produce-a-list-of-versions
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
#!/usr/bin/env bash
echo -n "[\"go1.17\",\"go1.18.1\"]" | tee $(results.versions.path)
@@ -206,9 +206,6 @@ spec:
Reason: "Succeeded",
Message: "All Steps have completed executing",
}}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{},
- },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -238,9 +235,6 @@ spec:
Reason: "Succeeded",
Message: "All Steps have completed executing",
}}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{},
- },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -267,9 +261,6 @@ spec:
Reason: "Succeeded",
Message: "All Steps have completed executing",
}}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{},
- },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -296,9 +287,6 @@ spec:
Reason: "Succeeded",
Message: "All Steps have completed executing",
}}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{},
- },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -319,9 +307,6 @@ spec:
Reason: "Succeeded",
Message: "All Steps have completed executing",
}}},
- TaskRunStatusFields: v1.TaskRunStatusFields{
- Artifacts: &v1.Artifacts{},
- },
},
}, {
ObjectMeta: metav1.ObjectMeta{
@@ -338,7 +323,6 @@ spec:
Type: "array",
Value: v1.ParamValue{Type: v1.ParamTypeArray, ArrayVal: []string{"linux/amd64", "linux/ppc64le"}},
}},
- Artifacts: &v1.Artifacts{},
},
Status: duckv1.Status{Conditions: []apis.Condition{{
Type: apis.ConditionSucceeded,
@@ -362,7 +346,6 @@ spec:
Type: "array",
Value: v1.ParamValue{Type: v1.ParamTypeArray, ArrayVal: []string{"go1.17", "go1.18.1"}},
}},
- Artifacts: &v1.Artifacts{},
},
Status: duckv1.Status{Conditions: []apis.Condition{{
Type: apis.ConditionSucceeded,
@@ -420,7 +403,7 @@ spec:
- name: exit-code
steps:
- name: echo
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
exit "$(params.exit-code)"
`, namespace))
diff --git a/upstream/test/parse/yaml.go b/upstream/test/parse/yaml.go
index 847084b9c26..68bc16e14b5 100644
--- a/upstream/test/parse/yaml.go
+++ b/upstream/test/parse/yaml.go
@@ -35,17 +35,6 @@ kind: StepAction
return &sa
}
-// MustParseV1beta1StepAction takes YAML and parses it into a *v1alpha1.StepAction
-func MustParseV1beta1StepAction(t *testing.T, yaml string) *v1beta1.StepAction {
- t.Helper()
- var sa v1beta1.StepAction
- yaml = `apiVersion: tekton.dev/v1beta1
-kind: StepAction
-` + yaml
- mustParseYAML(t, yaml, &sa)
- return &sa
-}
-
// MustParseV1beta1TaskRun takes YAML and parses it into a *v1beta1.TaskRun
func MustParseV1beta1TaskRun(t *testing.T, yaml string) *v1beta1.TaskRun {
t.Helper()
diff --git a/upstream/test/per_feature_flags_test.go b/upstream/test/per_feature_flags_test.go
index 4fb8f833005..4555d2010c9 100644
--- a/upstream/test/per_feature_flags_test.go
+++ b/upstream/test/per_feature_flags_test.go
@@ -47,8 +47,8 @@ const (
)
var (
- alphaFeatureFlags = []string{"enable-param-enum", "keep-pod-enabled-cancel", "enable-cel-in-whenexpression", "enable-artifacts"}
- betaFeatureFlags = []string{"enable-step-actions"}
+ alphaFeatureFlags = []string{"enable-param-enum", "enable-step-actions", "keep-pod-enabled-cancel", "enable-cel-in-whenexpression", "enable-artifacts"}
+ betaFeatureFlags = []string{}
perFeatureFlags = map[string][]string{
"alpha": alphaFeatureFlags,
"beta": betaFeatureFlags,
@@ -152,9 +152,9 @@ spec:
results:
- name: result
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: 'echo $(params["text"])'
- - image: mirror.gcr.io/busybox
+ - image: busybox
# Sleep for N seconds so that we can check that tasks that
# should be run in parallel have overlap.
script: |
@@ -269,7 +269,7 @@ spec:
- name: result1
steps:
- name: failing-step
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'echo -n 123 | tee $(results.result1.path); exit 1'
finally:
- name: finaltask1
@@ -280,7 +280,7 @@ spec:
params:
- name: param1
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: 'echo $(params.param1);exit 0'
`, helpers.ObjectNameForTest(t)))
@@ -356,14 +356,14 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
`, namespace))
@@ -397,14 +397,14 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
status:
pipelineSpec:
@@ -413,14 +413,14 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
@@ -443,7 +443,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
podName: propagated-parameters-fully-echo-hello-pod
@@ -453,7 +453,7 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
@@ -466,7 +466,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
podName: propagated-parameters-fully-echo-hello-finally-pod
@@ -476,7 +476,7 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
diff --git a/upstream/test/pipelinefinally_test.go b/upstream/test/pipelinefinally_test.go
index 4173a446556..50397307012 100644
--- a/upstream/test/pipelinefinally_test.go
+++ b/upstream/test/pipelinefinally_test.go
@@ -788,7 +788,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'exit 0'
`, helpers.ObjectNameForTest(t), namespace))
}
@@ -801,7 +801,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'exit 1'
`, helpers.ObjectNameForTest(t), namespace))
}
@@ -814,7 +814,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'sleep 5; exit 0'
`, helpers.ObjectNameForTest(t), namespace))
}
@@ -827,7 +827,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'exit 0'
params:
- name: dagtask1-status
@@ -845,7 +845,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'echo -n "Hello" > $(results.result.path)'
results:
- name: result
@@ -890,7 +890,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'sleep 5; echo -n "Hello" > $(results.result.path)'
results:
- name: result
@@ -905,7 +905,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'exit 0'
params:
- name: %s
diff --git a/upstream/test/pipelinerun_test.go b/upstream/test/pipelinerun_test.go
index 06b4448abd3..64b8534f862 100644
--- a/upstream/test/pipelinerun_test.go
+++ b/upstream/test/pipelinerun_test.go
@@ -68,7 +68,7 @@ spec:
- name: HELLO
default: "Hi!"
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.HELLO)"
@@ -91,6 +91,8 @@ spec:
}}
for i, td := range tds {
+ i := i // capture range variable
+ td := td // capture range variable
t.Run(td.name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -295,6 +297,8 @@ spec:
}}
for i, td := range tds {
+ i := i // capture range variable
+ td := td // capture range variable
t.Run(td.name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -440,7 +444,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
# Sleep for 10s
@@ -450,7 +454,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
# Sleep for another 10s
@@ -512,7 +516,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
command: ['/bin/bash']
args: ['-c', 'echo hello, world']
`, taskName, namespace)), metav1.CreateOptions{}); err != nil {
@@ -582,10 +586,10 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
name: write-data-task-0-step-0
script: echo stuff | tee $(results.result-stuff.path)
- - image: mirror.gcr.io/busybox
+ - image: busybox
name: write-data-task-0-step-1
script: echo other | tee $(results.result-other.path)
results:
@@ -600,10 +604,10 @@ spec:
params:
- name: check-stuff
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
name: read-from-task-0
script: echo $(params.check-stuff)
- - image: mirror.gcr.io/busybox
+ - image: busybox
name: write-data-task-1
script: echo | tee $(results.result-something.path)
results:
@@ -618,9 +622,9 @@ spec:
- name: check-other
steps:
- script: echo $(params.check-other)
- image: mirror.gcr.io/busybox
+ image: busybox
name: read-from-task-0
- - image: mirror.gcr.io/busybox
+ - image: busybox
name: write-data-task-1
script: echo something | tee $(results.result-else.path)
results:
@@ -637,7 +641,7 @@ spec:
- name: workspacepath-else
value: $(tasks.create-file.results.result-else)
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: echo params.workspacepath-something
`, helpers.ObjectNameForTest(t), namespace)),
}
@@ -971,7 +975,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
command: ['/bin/bash']
args: ['-c', 'echo hello, world']
`, taskName, namespace)), metav1.CreateOptions{}); err != nil {
@@ -1007,7 +1011,7 @@ spec:
- name: abc
steps:
- name: update-sa
- image: mirror.gcr.io/bash
+ image: bash:latest
script: |
echo 'test' > $(results.abc.path)
exit 1
diff --git a/upstream/test/premption_test.go b/upstream/test/premption_test.go
index a8dd50e36df..59373503b55 100644
--- a/upstream/test/premption_test.go
+++ b/upstream/test/premption_test.go
@@ -62,7 +62,7 @@ spec:
memory: 5000Gi
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "Good Morning!" > $(workspaces.task-ws.path)
diff --git a/upstream/test/propagated_params_test.go b/upstream/test/propagated_params_test.go
index 87429a03b78..4e615201124 100644
--- a/upstream/test/propagated_params_test.go
+++ b/upstream/test/propagated_params_test.go
@@ -66,6 +66,7 @@ func TestPropagatedParams(t *testing.T) {
}}
for _, td := range tds {
+ td := td
t.Run(td.name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -150,14 +151,14 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
`, namespace))
expectedPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
@@ -176,14 +177,14 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
status:
pipelineSpec:
@@ -192,14 +193,14 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
finally:
- name: echo-hello-finally
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -211,18 +212,17 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
podName: propagated-parameters-fully-echo-hello-pod
- artifacts: {}
steps:
- name: echo
container: step-echo
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
finallyTaskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -234,10 +234,9 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
- artifacts: {}
podName: propagated-parameters-fully-echo-hello-finally-pod
steps:
- name: echo
@@ -245,7 +244,7 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
return pipelineRun, expectedPipelineRun, []*v1.TaskRun{taskRun, finallyTaskRun}
@@ -270,7 +269,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
`, namespace))
expectedPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
@@ -292,7 +291,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
status:
pipelineSpec:
@@ -304,7 +303,7 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -319,10 +318,9 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
- artifacts: {}
podName: propagated-parameters-task-level-echo-hello-pod
steps:
- name: echo
@@ -330,7 +328,7 @@ status:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
return pipelineRun, expectedPipelineRun, []*v1.TaskRun{taskRun}
@@ -356,7 +354,7 @@ spec:
default: "Default Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
`, namespace))
expectedPipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
@@ -379,7 +377,7 @@ spec:
default: "Default Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.HELLO)
status:
pipelineSpec:
@@ -392,7 +390,7 @@ status:
default: "Default Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -408,10 +406,9 @@ spec:
default: "Default Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
status:
- artifacts: {}
podName: propagated-parameters-default-task-level-echo-hello-pod
steps:
- name: echo
@@ -423,7 +420,7 @@ status:
default: "Default Hello World"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo Hello World!
`, namespace))
return pipelineRun, expectedPipelineRun, []*v1.TaskRun{taskRun}
diff --git a/upstream/test/propagated_results_test.go b/upstream/test/propagated_results_test.go
index 6e1a1b87fc9..57fee1be165 100644
--- a/upstream/test/propagated_results_test.go
+++ b/upstream/test/propagated_results_test.go
@@ -43,8 +43,7 @@ func TestPropagatedResults(t *testing.T) {
ignorePipelineRunStatusFields := cmpopts.IgnoreFields(v1.PipelineRunStatusFields{}, "Provenance")
ignoreTaskRunStatus := cmpopts.IgnoreFields(v1.TaskRunStatusFields{}, "StartTime", "CompletionTime", "Sidecars", "Provenance")
requireAlphaFeatureFlag = requireAnyGate(map[string]string{
- "enable-api-fields": "alpha",
- })
+ "enable-api-fields": "alpha"})
type tests struct {
name string
@@ -59,6 +58,7 @@ func TestPropagatedResults(t *testing.T) {
}}
for _, td := range tds {
+ td := td
t.Run(td.name, func(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -151,17 +151,17 @@ spec:
}
steps:
- name: add-str-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo "1001" | tee $(results.strUid.path)
- name: add-array-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo "[\"1002\", \"1003\"]" | tee $(results.arrayUid.path)
- name: add-map-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo -n "{\"uid\":\"1004\"}" | tee $(results.mapUid.path)
@@ -169,19 +169,19 @@ spec:
taskSpec:
steps:
- name: show-str-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo
- $(tasks.make-uid.results.strUid)
- name: show-array-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo
- $(tasks.make-uid.results.arrayUid[*])
- name: show-map-uid
- image: mirror.gcr.io/busybox
+ image: busybox
command: ["/bin/sh", "-c"]
args:
- echo
@@ -212,21 +212,21 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-str-uid
- args:
- echo "[\"1002\", \"1003\"]" | tee $(results.arrayUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-array-uid
- args:
- echo -n "{\"uid\":\"1004\"}" | tee $(results.mapUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-map-uid
- name: show-uid
taskSpec:
@@ -237,7 +237,7 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-str-uid
- args:
- echo
@@ -245,7 +245,7 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-array-uid
- args:
- echo
@@ -253,7 +253,7 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-map-uid
timeouts:
pipeline: 1h0m0s
@@ -278,21 +278,21 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-str-uid
- args:
- echo "[\"1002\", \"1003\"]" | tee $(results.arrayUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-array-uid
- args:
- echo -n "{\"uid\":\"1004\"}" | tee $(results.mapUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-map-uid
- name: show-uid
taskSpec:
@@ -303,7 +303,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-str-uid
- args:
- echo
@@ -311,7 +311,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-array-uid
- args:
- echo
@@ -319,7 +319,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-map-uid
`, namespace))
makeUidTaskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -344,25 +344,24 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-str-uid
- args:
- echo "[\"1002\", \"1003\"]" | tee $(results.arrayUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-array-uid
- args:
- echo -n "{\"uid\":\"1004\"}" | tee $(results.mapUid.path)
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-map-uid
timeout: 1h0m0s
status:
- artifacts: {}
podName: propagated-all-type-results-make-uid-pod
results:
- name: strUid
@@ -402,21 +401,21 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-str-uid
- args:
- echo "[\"1002\", \"1003\"]" | tee /tekton/results/arrayUid
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-array-uid
- args:
- echo -n "{\"uid\":\"1004\"}" | tee /tekton/results/mapUid
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: add-map-uid
`, namespace))
showUidTaskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -433,7 +432,7 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-str-uid
- args:
- echo
@@ -442,7 +441,7 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-array-uid
- args:
- echo
@@ -450,11 +449,10 @@ spec:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-map-uid
timeout: 1h0m0s
status:
- artifacts: {}
podName: propagated-all-type-results-show-uid-pod
steps:
- container: step-show-str-uid
@@ -472,7 +470,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-str-uid
- args:
- echo
@@ -481,7 +479,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-array-uid
- args:
- echo
@@ -489,7 +487,7 @@ status:
command:
- /bin/sh
- -c
- image: mirror.gcr.io/busybox
+ image: busybox
name: show-map-uid
`, namespace))
return pipelineRun, expectedPipelineRun, []*v1.TaskRun{makeUidTaskRun, showUidTaskRun}
diff --git a/upstream/test/remoteresolution/resolution.go b/upstream/test/remoteresolution/resolution.go
deleted file mode 100644
index 05274a7a182..00000000000
--- a/upstream/test/remoteresolution/resolution.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
-Copyright 2024 The Tekton Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package test
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
-
- "github.com/google/go-cmp/cmp"
- pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- resource "github.com/tektoncd/pipeline/pkg/remoteresolution/resource"
- resolution "github.com/tektoncd/pipeline/pkg/resolution/common"
- "github.com/tektoncd/pipeline/test/diff"
-)
-
-var _ resource.Requester = &Requester{}
-var _ resolution.ResolvedResource = &ResolvedResource{}
-
-// NewResolvedResource creates a mock resolved resource that is
-// populated with the given data and annotations or returns the given
-// error from its Data() method.
-func NewResolvedResource(data []byte, annotations map[string]string, source *pipelinev1.RefSource, dataErr error) *ResolvedResource {
- return &ResolvedResource{
- ResolvedData: data,
- ResolvedAnnotations: annotations,
- ResolvedRefSource: source,
- DataErr: dataErr,
- }
-}
-
-// NewRequester creates a mock requester that resolves to the given
-// resource or returns the given error on Submit().
-func NewRequester(resource resolution.ResolvedResource, err error, resolverPayload resource.ResolverPayload) *Requester {
- return &Requester{
- ResolvedResource: resource,
- SubmitErr: err,
- ResolverPayload: resolverPayload,
- }
-}
-
-// Requester implements resolution.Requester and makes it easier
-// to mock the outcome of a remote pipelineRef or taskRef resolution.
-type Requester struct {
- // The resolved resource object to return when a request is
- // submitted.
- ResolvedResource resolution.ResolvedResource
- // An error to return when a request is submitted.
- SubmitErr error
- // ResolverPayload that should match that of the request in order to return the resolved resource
- ResolverPayload resource.ResolverPayload
-}
-
-// Submit implements resolution.Requester, accepting the name of a
-// resolver and a request for a specific remote file, and then returns
-// whatever mock data was provided on initialization.
-func (r *Requester) Submit(ctx context.Context, resolverName resolution.ResolverName, req resource.Request) (resolution.ResolvedResource, error) {
- if (r.ResolverPayload == resource.ResolverPayload{} || r.ResolverPayload.ResolutionSpec == nil || len(r.ResolverPayload.ResolutionSpec.Params) == 0) {
- return r.ResolvedResource, r.SubmitErr
- }
- if r.ResolverPayload.ResolutionSpec.URL == "" {
- return r.ResolvedResource, r.SubmitErr
- }
- reqParams := make(map[string]pipelinev1.ParamValue)
- for _, p := range req.ResolverPayload().ResolutionSpec.Params {
- reqParams[p.Name] = p.Value
- }
-
- var wrongParams []string
- for _, p := range r.ResolverPayload.ResolutionSpec.Params {
- if reqValue, ok := reqParams[p.Name]; !ok {
- wrongParams = append(wrongParams, fmt.Sprintf("expected %s param to be %#v, but was %#v", p.Name, p.Value, reqValue))
- } else if d := cmp.Diff(p.Value, reqValue); d != "" {
- wrongParams = append(wrongParams, fmt.Sprintf("%s param did not match: %s", p.Name, diff.PrintWantGot(d)))
- }
- }
- if len(wrongParams) > 0 {
- return nil, errors.New(strings.Join(wrongParams, "; "))
- }
- if r.ResolverPayload.ResolutionSpec.URL != req.ResolverPayload().ResolutionSpec.URL {
- return nil, fmt.Errorf("Resolution name did not match. Got %s; Want %s", req.ResolverPayload().ResolutionSpec.URL, r.ResolverPayload.ResolutionSpec.URL)
- }
-
- return r.ResolvedResource, r.SubmitErr
-}
-
-// ResolvedResource implements resolution.ResolvedResource and makes
-// it easier to mock the resolved content of a fetched pipeline or task.
-type ResolvedResource struct {
- // The resolved bytes to return when resolution is complete.
- ResolvedData []byte
- // An error to return instead of the resolved bytes after
- // resolution completes.
- DataErr error
- // Annotations to return when resolution is complete.
- ResolvedAnnotations map[string]string
- // ResolvedRefSource to return the source reference of the remote data
- ResolvedRefSource *pipelinev1.RefSource
-}
-
-// Data implements resolution.ResolvedResource and returns the mock
-// data and/or error given to it on initialization.
-func (r *ResolvedResource) Data() ([]byte, error) {
- return r.ResolvedData, r.DataErr
-}
-
-// Annotations implements resolution.ResolvedResource and returns
-// the mock annotations given to it on initialization.
-func (r *ResolvedResource) Annotations() map[string]string {
- return r.ResolvedAnnotations
-}
-
-// RefSource is the source reference of the remote data that records where the remote
-// file came from including the url, digest and the entrypoint.
-func (r *ResolvedResource) RefSource() *pipelinev1.RefSource {
- return r.ResolvedRefSource
-}
-
-// RawRequest stores the raw request data
-type RawRequest struct {
- ResolverPayload resource.ResolverPayload
-}
-
-// Request returns a Request interface based on the RawRequest.
-func (r *RawRequest) Request() resource.Request {
- if r == nil {
- r = &RawRequest{}
- }
- return &Request{
- RawRequest: *r,
- }
-}
-
-// Request implements resolution.Request and makes it easier to mock input for submit
-// Using inline structs is to avoid conflicts between field names and method names.
-type Request struct {
- RawRequest
-}
-
-var _ resource.Request = &Request{}
-
-// NewRequest creates a mock request that is populated with the given name namespace and params
-func NewRequest(resolverPayload resource.ResolverPayload) *Request {
- return &Request{
- RawRequest: RawRequest{
- ResolverPayload: resolverPayload,
- },
- }
-}
-
-// Params implements resolution.Request and returns the mock params given to it on initialization.
-func (r *Request) ResolverPayload() resource.ResolverPayload {
- return r.RawRequest.ResolverPayload
-}
-
-var _ resource.Request = &Request{}
diff --git a/upstream/test/resolution/resolution.go b/upstream/test/resolution.go
similarity index 91%
rename from upstream/test/resolution/resolution.go
rename to upstream/test/resolution.go
index b8b940658f9..514988427f2 100644
--- a/upstream/test/resolution/resolution.go
+++ b/upstream/test/resolution.go
@@ -24,16 +24,16 @@ import (
"github.com/google/go-cmp/cmp"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- common "github.com/tektoncd/pipeline/pkg/resolution/common"
+ resolution "github.com/tektoncd/pipeline/pkg/resolution/common"
"github.com/tektoncd/pipeline/test/diff"
)
-var _ common.Requester = &Requester{}
-var _ common.ResolvedResource = &ResolvedResource{}
+var _ resolution.Requester = &Requester{}
+var _ resolution.ResolvedResource = &ResolvedResource{}
// NewRequester creates a mock requester that resolves to the given
// resource or returns the given error on Submit().
-func NewRequester(resource common.ResolvedResource, err error) *Requester {
+func NewRequester(resource resolution.ResolvedResource, err error) *Requester {
return &Requester{
ResolvedResource: resource,
SubmitErr: err,
@@ -57,7 +57,7 @@ func NewResolvedResource(data []byte, annotations map[string]string, source *pip
type Requester struct {
// The resolved resource object to return when a request is
// submitted.
- ResolvedResource common.ResolvedResource
+ ResolvedResource resolution.ResolvedResource
// An error to return when a request is submitted.
SubmitErr error
// Params that should match those on the request in order to return the resolved resource
@@ -67,7 +67,7 @@ type Requester struct {
// Submit implements resolution.Requester, accepting the name of a
// resolver and a request for a specific remote file, and then returns
// whatever mock data was provided on initialization.
-func (r *Requester) Submit(ctx context.Context, resolverName common.ResolverName, req common.Request) (common.ResolvedResource, error) {
+func (r *Requester) Submit(ctx context.Context, resolverName resolution.ResolverName, req resolution.Request) (resolution.ResolvedResource, error) {
if len(r.Params) == 0 {
return r.ResolvedResource, r.SubmitErr
}
@@ -134,7 +134,7 @@ type RawRequest struct {
}
// Request returns a Request interface based on the RawRequest.
-func (r *RawRequest) Request() common.Request {
+func (r *RawRequest) Request() resolution.Request {
if r == nil {
r = &RawRequest{}
}
@@ -149,7 +149,7 @@ type Request struct {
RawRequest
}
-var _ common.Request = &Request{}
+var _ resolution.Request = &Request{}
// NewRequest creates a mock request that is populated with the given name namespace and params
func NewRequest(name, namespace string, params []pipelinev1.Param) *Request {
diff --git a/upstream/test/resolvers_test.go b/upstream/test/resolvers_test.go
index 813e6b34d47..1979a1703c2 100644
--- a/upstream/test/resolvers_test.go
+++ b/upstream/test/resolvers_test.go
@@ -37,7 +37,7 @@ import (
resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun"
- gitresolution "github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
+ "github.com/tektoncd/pipeline/pkg/resolution/resolver/git"
"github.com/tektoncd/pipeline/test/parse"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -388,7 +388,7 @@ spec:
taskSpec:
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
# Sleep for 10s
@@ -484,24 +484,24 @@ func TestGitResolver_API(t *testing.T) {
resovlerNS := resolverconfig.ResolversNamespace(systemNamespace)
- originalConfigMap, err := c.KubeClient.CoreV1().ConfigMaps(resovlerNS).Get(ctx, gitresolution.ConfigMapName, metav1.GetOptions{})
+ originalConfigMap, err := c.KubeClient.CoreV1().ConfigMaps(resovlerNS).Get(ctx, git.ConfigMapName, metav1.GetOptions{})
if err != nil {
- t.Fatalf("Failed to get ConfigMap `%s`: %s", gitresolution.ConfigMapName, err)
+ t.Fatalf("Failed to get ConfigMap `%s`: %s", git.ConfigMapName, err)
}
originalConfigMapData := originalConfigMap.Data
- t.Logf("Creating ConfigMap %s", gitresolution.ConfigMapName)
+ t.Logf("Creating ConfigMap %s", git.ConfigMapName)
configMapData := map[string]string{
- gitresolution.ServerURLKey: fmt.Sprint("http://", net.JoinHostPort(giteaClusterHostname, "3000")),
- gitresolution.SCMTypeKey: "gitea",
- gitresolution.APISecretNameKey: tokenSecretName,
- gitresolution.APISecretKeyKey: scmTokenSecretKey,
- gitresolution.APISecretNamespaceKey: namespace,
+ git.ServerURLKey: fmt.Sprint("http://", net.JoinHostPort(giteaClusterHostname, "3000")),
+ git.SCMTypeKey: "gitea",
+ git.APISecretNameKey: tokenSecretName,
+ git.APISecretKeyKey: scmTokenSecretKey,
+ git.APISecretNamespaceKey: namespace,
}
- if err := updateConfigMap(ctx, c.KubeClient, resovlerNS, gitresolution.ConfigMapName, configMapData); err != nil {
+ if err := updateConfigMap(ctx, c.KubeClient, resovlerNS, git.ConfigMapName, configMapData); err != nil {
t.Fatal(err)
}
- defer resetConfigMap(ctx, t, c, resovlerNS, gitresolution.ConfigMapName, originalConfigMapData)
+ defer resetConfigMap(ctx, t, c, resovlerNS, git.ConfigMapName, originalConfigMapData)
trName := helpers.ObjectNameForTest(t)
tr := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
@@ -533,70 +533,6 @@ spec:
}
}
-func TestGitResolver_API_Identifier(t *testing.T) {
- ctx := context.Background()
- c, namespace := setup(ctx, t, gitFeatureFlags)
-
- t.Parallel()
-
- knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
- defer tearDown(ctx, t, c, namespace)
-
- giteaClusterHostname, tokenSecretName := setupGitea(ctx, t, c, namespace)
-
- resovlerNS := resolverconfig.ResolversNamespace(systemNamespace)
-
- originalConfigMap, err := c.KubeClient.CoreV1().ConfigMaps(resovlerNS).Get(ctx, gitresolution.ConfigMapName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Failed to get ConfigMap `%s`: %s", gitresolution.ConfigMapName, err)
- }
- originalConfigMapData := originalConfigMap.Data
-
- t.Logf("Creating ConfigMap %s", gitresolution.ConfigMapName)
- configMapData := map[string]string{
- "test." + gitresolution.ServerURLKey: fmt.Sprint("http://", net.JoinHostPort(giteaClusterHostname, "3000")),
- "test." + gitresolution.SCMTypeKey: "gitea",
- "test." + gitresolution.APISecretNameKey: tokenSecretName,
- "test." + gitresolution.APISecretKeyKey: scmTokenSecretKey,
- "test." + gitresolution.APISecretNamespaceKey: namespace,
- }
- if err := updateConfigMap(ctx, c.KubeClient, resovlerNS, gitresolution.ConfigMapName, configMapData); err != nil {
- t.Fatal(err)
- }
- defer resetConfigMap(ctx, t, c, resovlerNS, gitresolution.ConfigMapName, originalConfigMapData)
-
- trName := helpers.ObjectNameForTest(t)
- tr := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- taskRef:
- resolver: git
- params:
- - name: revision
- value: %s
- - name: pathInRepo
- value: %s
- - name: org
- value: %s
- - name: repo
- value: %s
- - name: configKey
- value: test
-`, trName, namespace, scmRemoteBranch, scmRemoteTaskPath, scmRemoteOrg, scmRemoteRepo))
-
- _, err = c.V1TaskRunClient.Create(ctx, tr, metav1.CreateOptions{})
- if err != nil {
- t.Fatalf("Failed to create TaskRun: %v", err)
- }
-
- t.Logf("Waiting for TaskRun %s in namespace %s to complete", trName, namespace)
- if err := WaitForTaskRunState(ctx, c, trName, TaskRunSucceed(trName), "TaskRunSuccess", v1Version); err != nil {
- t.Fatalf("Error waiting for TaskRun %s to finish: %s", trName, err)
- }
-}
-
// setupGitea reads git-resolver/gitea.yaml, replaces "default" namespace references in "namespace: default" and
// svc.cluster.local hostnames with the test namespace, calls kubectl create, and waits for the gitea-0 pod to be up
// and running. At that point, it'll create a test user and token, create a Secret containing that token, create an org
@@ -652,7 +588,7 @@ spec:
- name: token
type: string
steps:
- - image: docker.io/alpine/curl
+ - image: alpine/curl
script: |
#!/bin/ash
curl -X POST "http://gitea_admin:%s@%s:3000/api/v1/admin/users" -H "accept: application/json" -H "Content-Type: application/json" -d '%s'
diff --git a/upstream/test/resolvers_yaml/pipeline-in-git.yaml b/upstream/test/resolvers_yaml/pipeline-in-git.yaml
index 9006f86af50..bf88408fad2 100644
--- a/upstream/test/resolvers_yaml/pipeline-in-git.yaml
+++ b/upstream/test/resolvers_yaml/pipeline-in-git.yaml
@@ -12,7 +12,7 @@ spec:
app: "example"
steps:
- name: echo
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: |
#!/usr/bin/env bash
echo "Good Morning!"
diff --git a/upstream/test/retry_test.go b/upstream/test/retry_test.go
index fbe8ed8776c..22e388d0ed1 100644
--- a/upstream/test/retry_test.go
+++ b/upstream/test/retry_test.go
@@ -55,7 +55,7 @@ spec:
retries: %d
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: exit 1
`, pipelineRunName, numRetries)), metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err)
diff --git a/upstream/test/serviceaccount_test.go b/upstream/test/serviceaccount_test.go
index 3ee469979db..2e5013ce69c 100644
--- a/upstream/test/serviceaccount_test.go
+++ b/upstream/test/serviceaccount_test.go
@@ -123,17 +123,17 @@ spec:
- name: task1
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo task1
- name: task2
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo task2
- name: task3
taskSpec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo task3
`, helpers.ObjectNameForTest(t), namespace))
if _, err := c.V1PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil {
@@ -242,7 +242,7 @@ spec:
taskSpec:
metadata: {}
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: echo task1
`, helpers.ObjectNameForTest(t), namespace))
if _, err := c.V1PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil {
diff --git a/upstream/test/sidecar_test.go b/upstream/test/sidecar_test.go
index f21b32eb3c1..7e6ccb23a40 100644
--- a/upstream/test/sidecar_test.go
+++ b/upstream/test/sidecar_test.go
@@ -61,13 +61,8 @@ func TestSidecarTaskSupport(t *testing.T) {
t.Parallel()
for _, test := range tests {
+ test := test
t.Run(test.desc, func(t *testing.T) {
- // If Kubernetes Sidecar support is enabled the Pod will terminate and it gets caught as an error though it's expected
- ff := getFeatureFlagsBaseOnAPIFlag(t)
-
- if ff.EnableKubernetesSidecar {
- t.SkipNow()
- }
t.Parallel()
ctx, cancel := context.WithCancel(ctx)
@@ -85,11 +80,11 @@ metadata:
spec:
steps:
- name: %s
- image: mirror.gcr.io/busybox
+ image: busybox
command: [%s]
sidecars:
- name: %s
- image: mirror.gcr.io/busybox
+ image: busybox
command: [%s]
`, sidecarTaskName, namespace, primaryContainerName, stringSliceToYAMLArray(test.stepCommand), sidecarContainerName, stringSliceToYAMLArray(test.sidecarCommand)))
diff --git a/upstream/test/start_time_test.go b/upstream/test/start_time_test.go
index ad2f29db289..48812ae9218 100644
--- a/upstream/test/start_time_test.go
+++ b/upstream/test/start_time_test.go
@@ -56,15 +56,15 @@ metadata:
spec:
taskSpec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: sleep 2
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: sleep 2
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: sleep 2
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: sleep 2
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: sleep 2
`, helpers.ObjectNameForTest(t), namespace)), metav1.CreateOptions{})
if err != nil {
diff --git a/upstream/test/status_test.go b/upstream/test/status_test.go
index 77d249ea42b..6f10b6d8c73 100644
--- a/upstream/test/status_test.go
+++ b/upstream/test/status_test.go
@@ -60,7 +60,7 @@ metadata:
spec:
steps:
- name: foo
- image: mirror.gcr.io/busybox
+ image: busybox
command: ['ls', '-la']`, helpers.ObjectNameForTest(t)))
if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create Task: %s", err)
@@ -234,7 +234,7 @@ spec:
- name: HELLO
default: "hello world!"
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
script: |
#!/usr/bin/env bash
echo "$(params.HELLO)"
diff --git a/upstream/test/step_when_test.go b/upstream/test/step_when_test.go
deleted file mode 100644
index adc0e8df01f..00000000000
--- a/upstream/test/step_when_test.go
+++ /dev/null
@@ -1,469 +0,0 @@
-//go:build e2e
-// +build e2e
-
-// /*
-// Copyright 2024 The Tekton Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// */
-package test
-
-import (
- "context"
- "fmt"
- "strconv"
- "testing"
-
- "github.com/tektoncd/pipeline/pkg/apis/config"
- v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/test/parse"
-
- "github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "knative.dev/pkg/system"
- knativetest "knative.dev/pkg/test"
- "knative.dev/pkg/test/helpers"
-)
-
-func TestWhenExpressionsInStep(t *testing.T) {
- tests := []struct {
- desc string
- expected []v1.StepState
- taskTemplate string
- }{
- {
- desc: "single step, when is false, skipped",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Skipped",
- Name: "foo",
- Container: "step-foo",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - input: "foo"
- operator: in
- values: [ "bar" ]
-`,
- },
- {
- desc: "single step, when is true, completed",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "foo",
- Container: "step-foo",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - input: "foo"
- operator: in
- values: [ "foo" ]
-`,
- },
- {
- desc: "two steps, first when is false, skipped and second step complete",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Skipped",
- Name: "foo",
- Container: "step-foo",
- }, {
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "bar",
- Container: "step-bar",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - input: "foo"
- operator: in
- values: [ "bar" ]
- - name: bar
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
-`,
- },
- {
- desc: "two steps, when is based on step-results",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "foo",
- Container: "step-foo",
- Results: []v1.TaskRunStepResult{
- {
- Name: "result1",
- Type: "string",
- Value: v1.ParamValue{Type: "string", StringVal: "bar"},
- },
- },
- }, {
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "bar",
- Container: "step-bar",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- results:
- - name: result1
- command: ['/bin/sh']
- args: ['-c', 'echo -n bar >> $(step.results.result1.path)']
- - name: bar
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - input: "$(steps.foo.results.result1)"
- operator: in
- values: [ "bar" ]
-`,
- },
- }
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- checkFlagsEnabled := requireAllGates(requireEnableStepActionsGate)
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- c, namespace := setup(ctx, t)
- checkFlagsEnabled(ctx, t, c, "")
-
- knativetest.CleanupOnInterrupt(func() {
- tearDown(ctx, t, c, namespace)
- }, t.Logf)
-
- defer tearDown(ctx, t, c, namespace)
-
- taskRunName := helpers.ObjectNameForTest(t)
-
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := parse.MustParseV1Task(t, fmt.Sprintf(tc.taskTemplate, taskRunName, namespace))
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- taskRef:
- name: %s
-`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
-
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceeded", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
-
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
- var ops cmp.Options
- ops = append(ops, cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID", "Message"))
- ops = append(ops, cmpopts.IgnoreFields(v1.StepState{}, "ImageID"))
- if d := cmp.Diff(taskrun.Status.Steps, tc.expected, ops); d != "" {
- t.Fatalf("-got, +want: %v", d)
- }
- })
- }
-}
-
-func TestWhenExpressionsCELInStep(t *testing.T) {
- tests := []struct {
- desc string
- expected []v1.StepState
- taskTemplate string
- }{
- {
- desc: "single step, when is false, skipped",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Skipped",
- Name: "foo",
- Container: "step-foo",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - cel: "'foo'=='bar'"
-`,
- },
- {
- desc: "single step, when CEL is true, completed",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "foo",
- Container: "step-foo",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - cel: "'foo'=='foo'"
-`,
- },
- {
- desc: "two steps, first when CEL is false, skipped and second step complete",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Skipped",
- Name: "foo",
- Container: "step-foo",
- }, {
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "bar",
- Container: "step-bar",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - cel: "'foo'=='bar'"
- - name: bar
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
-`,
- },
- {
- desc: "two steps, when cel is based on step-results",
- expected: []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "foo",
- Container: "step-foo",
- Results: []v1.TaskRunStepResult{
- {
- Name: "result1",
- Type: "string",
- Value: v1.ParamValue{Type: "string", StringVal: "bar"},
- },
- },
- }, {
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "bar",
- Container: "step-bar",
- }},
- taskTemplate: `
-metadata:
- name: %s
- namespace: %s
-spec:
- steps:
- - name: foo
- image: busybox
- results:
- - name: result1
- command: ['/bin/sh']
- args: ['-c', 'echo -n bar >> $(step.results.result1.path)']
- - name: bar
- image: busybox
- command: ['/bin/sh']
- args: ['-c', 'echo hello']
- when:
- - cel: "'$(steps.foo.results.result1)'=='bar'"
-`,
- },
- }
- for _, tc := range tests {
- t.Run(tc.desc, func(t *testing.T) {
- featureFlags := getFeatureFlagsBaseOnAPIFlag(t)
- checkFlagsEnabled := requireAllGates(requireEnableStepActionsGate)
-
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- c, namespace := setup(ctx, t)
- checkFlagsEnabled(ctx, t, c, "")
-
- previous := featureFlags.EnableCELInWhenExpression
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- config.EnableCELInWhenExpression: "true",
- })
-
- knativetest.CleanupOnInterrupt(func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- config.EnableCELInWhenExpression: strconv.FormatBool(previous),
- })
- tearDown(ctx, t, c, namespace)
- }, t.Logf)
- defer func() {
- updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), map[string]string{
- config.EnableCELInWhenExpression: strconv.FormatBool(previous),
- })
- tearDown(ctx, t, c, namespace)
- }()
-
- taskRunName := helpers.ObjectNameForTest(t)
-
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := parse.MustParseV1Task(t, fmt.Sprintf(tc.taskTemplate, taskRunName, namespace))
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- taskRef:
- name: %s
-`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
-
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceeded", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
-
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
- var ops cmp.Options
- ops = append(ops, cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID", "Message"))
- ops = append(ops, cmpopts.IgnoreFields(v1.StepState{}, "ImageID"))
- if d := cmp.Diff(taskrun.Status.Steps, tc.expected, ops); d != "" {
- t.Fatalf("-got, +want: %v", d)
- }
- })
- }
-}
diff --git a/upstream/test/stepaction_results_test.go b/upstream/test/stepaction_results_test.go
index 43a5702c582..5c3fafa50a2 100644
--- a/upstream/test/stepaction_results_test.go
+++ b/upstream/test/stepaction_results_test.go
@@ -28,7 +28,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/tektoncd/pipeline/pkg/apis/config"
v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
- "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+ v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
"github.com/tektoncd/pipeline/test/parse"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/system"
@@ -49,7 +49,7 @@ func TestStepResultsStepActions(t *testing.T) {
type tests struct {
name string
taskRunFunc func(*testing.T, string) (*v1.TaskRun, *v1.TaskRun)
- stepActionFunc func(*testing.T, string) *v1beta1.StepAction
+ stepActionFunc func(*testing.T, string) *v1alpha1.StepAction
}
tds := []tests{{
@@ -59,6 +59,7 @@ func TestStepResultsStepActions(t *testing.T) {
}}
for _, td := range tds {
+ td := td
t.Run(td.name, func(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
@@ -79,7 +80,7 @@ func TestStepResultsStepActions(t *testing.T) {
trName := taskRun.Name
- _, err := c.V1beta1StepActionClient.Create(ctx, stepAction, metav1.CreateOptions{})
+ _, err := c.V1alpha1StepActionClient.Create(ctx, stepAction, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create StepAction : %s", err)
}
@@ -113,9 +114,9 @@ func TestStepResultsStepActions(t *testing.T) {
}
}
-func getStepAction(t *testing.T, namespace string) *v1beta1.StepAction {
+func getStepAction(t *testing.T, namespace string) *v1alpha1.StepAction {
t.Helper()
- return parse.MustParseV1beta1StepAction(t, fmt.Sprintf(`
+ return parse.MustParseV1alpha1StepAction(t, fmt.Sprintf(`
metadata:
name: step-action
namespace: %s
@@ -123,7 +124,7 @@ spec:
results:
- name: result1
type: string
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n step-action >> $(step.results.result1.path)
`, namespace))
@@ -139,7 +140,7 @@ spec:
taskSpec:
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n inlined-step >> $(step.results.result1.path)
results:
@@ -167,7 +168,7 @@ spec:
taskSpec:
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo -n inlined-step >> $(step.results.result1.path)
results:
@@ -189,18 +190,17 @@ status:
status: "True"
reason: "Succeeded"
podName: step-results-task-run-pod
- artifacts: {}
taskSpec:
steps:
- name: step1
- image: mirror.gcr.io/alpine
+ image: alpine
results:
- name: result1
type: string
script: |
echo -n inlined-step >> /tekton/steps/step-step1/results/result1
- name: step2
- image: mirror.gcr.io/alpine
+ image: alpine
results:
- name: result1
type: string
diff --git a/upstream/test/task_results_from_failed_tasks_test.go b/upstream/test/task_results_from_failed_tasks_test.go
index 16911f40d54..5ecf98ae204 100644
--- a/upstream/test/task_results_from_failed_tasks_test.go
+++ b/upstream/test/task_results_from_failed_tasks_test.go
@@ -52,7 +52,7 @@ spec:
- name: result2
steps:
- name: failing-step
- image: mirror.gcr.io/busybox
+ image: busybox
script: 'echo -n 123 | tee $(results.result1.path); exit 1; echo -n 456 | tee $(results.result2.path)'
finally:
- name: finaltask1
@@ -63,7 +63,7 @@ spec:
params:
- name: param1
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: 'exit 0'
- name: finaltask2
params:
@@ -73,7 +73,7 @@ spec:
params:
- name: param1
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
script: exit 0`, helpers.ObjectNameForTest(t)))
if _, err := c.V1beta1PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil {
diff --git a/upstream/test/taskrun_test.go b/upstream/test/taskrun_test.go
index d8382c18331..ee277fc4daa 100644
--- a/upstream/test/taskrun_test.go
+++ b/upstream/test/taskrun_test.go
@@ -61,13 +61,13 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'echo hello']
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'exit 1']
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 30s']
`, helpers.ObjectNameForTest(t), namespace))
@@ -127,33 +127,10 @@ spec:
Name: "unnamed-2",
Container: "step-unnamed-2",
}}
- expectedStepNumber := len(expectedStepState)
- if len(taskrun.Status.Steps) < expectedStepNumber {
- t.Fatalf("expected at least %d steps, got %d", expectedStepNumber, len(taskrun.Status.Steps))
- }
ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID")
- ignoreStepFields := cmpopts.IgnoreFields(v1.StepState{}, "ImageID", "Running")
- lastStepIndex := len(expectedStepState) - 1
- for i := range lastStepIndex {
- if d := cmp.Diff(taskrun.Status.Steps[i], expectedStepState[i], ignoreTerminatedFields, ignoreStepFields); d != "" {
- t.Fatalf("taskrun.Status.Steps[%d]:\n-got, +want: %v", i, d)
- }
- }
-
- // Sometimes, the state of the last container in the Pod is still running,
- // and the state content of the final step is not skipped.
- // In this case, we should compare the state of the last step with the normal state.
- otherLatestExpectedStepState := v1.StepState{
- Name: "unnamed-2",
- Container: "step-unnamed-2",
- }
-
- if d := cmp.Diff(taskrun.Status.Steps[lastStepIndex], expectedStepState[lastStepIndex], ignoreTerminatedFields, ignoreStepFields); d != "" {
- t.Logf("taskrun.Status.Steps[%d]:\n-got, +want: %v", lastStepIndex, d)
- // try to compare the state of the last step with the other state
- if d := cmp.Diff(taskrun.Status.Steps[lastStepIndex], otherLatestExpectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" {
- t.Fatalf("taskrun.Status.Steps[%d]:\n-got, +want: %v", lastStepIndex, d)
- }
+ ignoreStepFields := cmpopts.IgnoreFields(v1.StepState{}, "ImageID")
+ if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" {
+ t.Fatalf("-got, +want: %v", d)
}
releaseAnnotation, ok := taskrun.Annotations[pod.ReleaseAnnotation]
@@ -535,7 +512,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'exit 1']
volumeMounts:
@@ -604,101 +581,3 @@ spec:
t.Fatalf("expected 1 retry status, got %d", len(taskrun.Status.RetriesStatus))
}
}
-
-func TestTaskRunResolveDefaultParameterSubstitutionOnStepAction(t *testing.T) {
- ctx := context.Background()
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- c, namespace := setup(ctx, t, requireAllGates(requireEnableStepActionsGate))
-
- knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
- defer tearDown(ctx, t, c, namespace)
-
- t.Logf("Creating Task and TaskRun in namespace %s", namespace)
- task := parse.MustParseV1Task(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- params:
- - name: repository
- type: string
- default: https://github.com/tektoncd/catalog.git
- - name: revision
- type: string
- default: main
- steps:
- - name: clone
- ref:
- resolver: git
- params:
- - name: url
- value: "$(params.repository)"
- - name: pathInRepo
- value: /stepaction/git-clone/0.1/git-clone.yaml
- - name: revision
- value: "$(params.revision)"
- params:
- - name: output-path
- value: "/tmp"
- - name: url
- value: $(params.repository)
- - name: revision
- value: $(params.revision)
-`, helpers.ObjectNameForTest(t), namespace))
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task: %s", err)
- }
-
- taskRunName := helpers.ObjectNameForTest(t)
- taskRun := parse.MustParseV1TaskRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- taskRef:
- name: %s
- retries: 1
-`, taskRunName, namespace, task.Name))
- if _, err := c.V1TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create TaskRun: %s", err)
- }
-
- t.Logf("Waiting for TaskRun in namespace %s to complete", namespace)
- if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed", v1Version); err != nil {
- t.Errorf("Error waiting for TaskRun to finish: %s", err)
- }
-
- taskrun, err := c.V1TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{})
- if err != nil {
- t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err)
- }
-
- if !isSuccessful(t, taskrun.GetName(), taskrun.Status.Conditions) {
- t.Fatalf("task should have succeeded")
- }
-
- expectedReason := "Succeeded"
- actualReason := taskrun.Status.GetCondition(apis.ConditionSucceeded).GetReason()
- if actualReason != expectedReason {
- t.Fatalf("expected TaskRun to have failed reason %s, got %s", expectedReason, actualReason)
- }
-
- expectedStepState := []v1.StepState{{
- ContainerState: corev1.ContainerState{
- Terminated: &corev1.ContainerStateTerminated{
- ExitCode: 0,
- Reason: "Completed",
- },
- },
- TerminationReason: "Completed",
- Name: "clone",
- Container: "step-clone",
- }}
- ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID", "Message")
- ignoreStepFields := cmpopts.IgnoreFields(v1.StepState{}, "ImageID", "Results", "Provenance")
- if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" {
- t.Fatalf("-got, +want: %v", d)
- }
-}
diff --git a/upstream/test/tektonbundles_test.go b/upstream/test/tektonbundles_test.go
index 39f80f3972f..3515dcaeb41 100644
--- a/upstream/test/tektonbundles_test.go
+++ b/upstream/test/tektonbundles_test.go
@@ -36,7 +36,9 @@ import (
"github.com/google/go-containerregistry/pkg/v1/layout"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
+ pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
+ "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun"
"github.com/tektoncd/pipeline/test/parse"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -46,11 +48,108 @@ import (
"sigs.k8s.io/yaml"
)
+var bundleFeatureFlags = requireAnyGate(map[string]string{
+ "enable-tekton-oci-bundles": "true",
+ "enable-api-fields": "alpha",
+})
+
var resolverFeatureFlags = requireAllGates(map[string]string{
"enable-bundles-resolver": "true",
"enable-api-fields": "beta",
})
+// TestTektonBundlesSimpleWorkingExample is an integration test which tests a simple, working Tekton bundle using OCI
+// images.
+func TestTektonBundlesSimpleWorkingExample(t *testing.T) {
+ ctx := context.Background()
+ c, namespace := setup(ctx, t, withRegistry, bundleFeatureFlags)
+
+ t.Parallel()
+
+ knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
+ defer tearDown(ctx, t, c, namespace)
+
+ taskName := helpers.ObjectNameForTest(t)
+ pipelineName := helpers.ObjectNameForTest(t)
+ pipelineRunName := helpers.ObjectNameForTest(t)
+ repo := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/tektonbundlessimple"
+ task := parse.MustParseV1beta1Task(t, fmt.Sprintf(`
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ steps:
+ - name: hello
+ image: alpine
+ script: 'echo Hello'
+`, taskName, namespace))
+
+ pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(`
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ name: %s
+ bundle: %s
+`, pipelineName, namespace, taskName, repo))
+
+ setupBundle(ctx, t, c, namespace, repo, task, pipeline)
+
+ // Now generate a PipelineRun to invoke this pipeline and task.
+ pr := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(`
+metadata:
+ name: %s
+spec:
+ pipelineRef:
+ name: %s
+ bundle: %s
+`, pipelineRunName, pipelineName, repo))
+ if _, err := c.V1beta1PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create PipelineRun: %s", err)
+ }
+
+ t.Logf("Waiting for PipelineRun in namespace %s to finish", namespace)
+ if err := WaitForPipelineRunState(ctx, c, pipelineRunName, timeout, PipelineRunSucceed(pipelineRunName), "PipelineRunCompleted", v1beta1Version); err != nil {
+ t.Errorf("Error waiting for PipelineRun to finish with error: %s", err)
+ }
+
+ trs, err := c.V1beta1TaskRunClient.List(ctx, metav1.ListOptions{})
+ if err != nil {
+ t.Errorf("Error retrieving taskrun: %s", err)
+ }
+ if len(trs.Items) != 1 {
+ t.Fatalf("Expected 1 TaskRun but found %d", len(trs.Items))
+ }
+
+ tr := trs.Items[0]
+ if tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() {
+ t.Errorf("Expected TaskRun to succeed but instead found condition: %s", tr.Status.GetCondition(apis.ConditionSucceeded))
+ }
+
+ if tr.Status.PodName == "" {
+ t.Fatal("Error getting a PodName (empty)")
+ }
+ p, err := c.KubeClient.CoreV1().Pods(namespace).Get(ctx, tr.Status.PodName, metav1.GetOptions{})
+ if err != nil {
+ t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace)
+ }
+ for _, stat := range p.Status.ContainerStatuses {
+ if strings.Contains(stat.Name, "step-hello") {
+ req := c.KubeClient.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name})
+ logContent, err := req.Do(ctx).Raw()
+ if err != nil {
+ t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace)
+ }
+ if !strings.Contains(string(logContent), "Hello") {
+ t.Fatalf("Expected logs to say hello but received %v", logContent)
+ }
+ }
+ }
+}
+
// TestTektonBundlesResolver is an integration test which tests a simple, working Tekton bundle using OCI
// images using the remote resolution bundles resolver.
func TestTektonBundlesResolver(t *testing.T) {
@@ -74,7 +173,7 @@ metadata:
spec:
steps:
- name: hello
- image: mirror.gcr.io/alpine
+ image: alpine
script: 'echo Hello'
`, taskName, namespace))
@@ -154,6 +253,169 @@ spec:
}
}
+// TestTektonBundlesUsingRegularImage is an integration test which passes a non-Tekton bundle as a task reference.
+func TestTektonBundlesUsingRegularImage(t *testing.T) {
+ ctx := context.Background()
+ c, namespace := setup(ctx, t, withRegistry, bundleFeatureFlags)
+
+ t.Parallel()
+
+ knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
+ defer tearDown(ctx, t, c, namespace)
+
+ taskName := helpers.ObjectNameForTest(t)
+ pipelineName := helpers.ObjectNameForTest(t)
+ pipelineRunName := helpers.ObjectNameForTest(t)
+ repo := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/tektonbundlesregularimage"
+
+ pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(`
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ name: %s
+ bundle: registry
+`, pipelineName, namespace, taskName))
+
+ setupBundle(ctx, t, c, namespace, repo, nil, pipeline)
+
+ // Now generate a PipelineRun to invoke this pipeline and task.
+ pr := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(`
+metadata:
+ name: %s
+spec:
+ pipelineRef:
+ name: %s
+ bundle: %s
+`, pipelineRunName, pipelineName, repo))
+ if _, err := c.V1beta1PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create PipelineRun: %s", err)
+ }
+
+ t.Logf("Waiting for PipelineRun in namespace %s to finish", namespace)
+ if err := WaitForPipelineRunState(ctx, c, pipelineRunName, timeout,
+ Chain(
+ FailedWithReason(pipelinev1.PipelineRunReasonCouldntGetTask.String(), pipelineRunName),
+ FailedWithMessage("does not contain a dev.tekton.image.apiVersion annotation", pipelineRunName),
+ ), "PipelineRunFailed", v1beta1Version); err != nil {
+ t.Fatalf("Error waiting for PipelineRun to finish with expected error: %s", err)
+ }
+}
+
+// TestTektonBundlesUsingImproperFormat is an integration test which passes an improperly formatted Tekton bundle as a
+// task reference.
+func TestTektonBundlesUsingImproperFormat(t *testing.T) {
+ ctx := context.Background()
+ c, namespace := setup(ctx, t, withRegistry, bundleFeatureFlags)
+
+ t.Parallel()
+
+ knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf)
+ defer tearDown(ctx, t, c, namespace)
+
+ taskName := helpers.ObjectNameForTest(t)
+ pipelineName := helpers.ObjectNameForTest(t)
+ pipelineRunName := helpers.ObjectNameForTest(t)
+ repo := getRegistryServiceIP(ctx, t, c, namespace) + ":5000/tektonbundlesimproperformat"
+
+ ref, err := name.ParseReference(repo)
+ if err != nil {
+ t.Fatalf("Failed to parse %s as an OCI reference: %s", repo, err)
+ }
+
+ task := parse.MustParseV1beta1Task(t, fmt.Sprintf(`
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ steps:
+ - name: hello
+ image: alpine
+ script: 'echo Hello'
+`, taskName, namespace))
+
+ pipeline := parse.MustParseV1beta1Pipeline(t, fmt.Sprintf(`
+metadata:
+ name: %s
+ namespace: %s
+spec:
+ tasks:
+ - name: hello-world
+ taskRef:
+ name: %s
+ bundle: %s
+`, pipelineName, namespace, taskName, repo))
+
+ // Write the pipeline into an image to the registry in the proper format. Write the task using incorrect
+ // annotations.
+ rawTask, err := yaml.Marshal(task)
+ if err != nil {
+ t.Fatalf("Failed to marshal task to yaml: %s", err)
+ }
+
+ rawPipeline, err := yaml.Marshal(pipeline)
+ if err != nil {
+ t.Fatalf("Failed to marshal task to yaml: %s", err)
+ }
+
+ img := empty.Image
+ taskLayer, err := tarball.LayerFromReader(bytes.NewBuffer(rawTask))
+ if err != nil {
+ t.Fatalf("Failed to create oci layer from task: %s", err)
+ }
+ pipelineLayer, err := tarball.LayerFromReader(bytes.NewBuffer(rawPipeline))
+ if err != nil {
+ t.Fatalf("Failed to create oci layer from pipeline: %s", err)
+ }
+ img, err = mutate.Append(img, mutate.Addendum{
+ Layer: taskLayer,
+ Annotations: map[string]string{
+ // intentionally invalid name annotation
+ "org.opencontainers.image.title": taskName,
+ "dev.tekton.image.kind": strings.ToLower(task.Kind),
+ "dev.tekton.image.apiVersion": task.APIVersion,
+ },
+ }, mutate.Addendum{
+ Layer: pipelineLayer,
+ Annotations: map[string]string{
+ "dev.tekton.image.name": pipelineName,
+ "dev.tekton.image.kind": strings.ToLower(pipeline.Kind),
+ "dev.tekton.image.apiVersion": pipeline.APIVersion,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed to create an oci image from the task and pipeline layers: %s", err)
+ }
+
+ // Publish this image to the in-cluster registry.
+ publishImg(ctx, t, c, namespace, img, ref)
+
+ // Now generate a PipelineRun to invoke this pipeline and task.
+ pr := parse.MustParseV1beta1PipelineRun(t, fmt.Sprintf(`
+metadata:
+ name: %s
+spec:
+ pipelineRef:
+ name: %s
+ bundle: %s
+`, pipelineRunName, pipelineName, repo))
+ if _, err := c.V1beta1PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil {
+ t.Fatalf("Failed to create PipelineRun: %s", err)
+ }
+
+ t.Logf("Waiting for PipelineRun in namespace %s to finish", namespace)
+ if err := WaitForPipelineRunState(ctx, c, pipelineRunName, timeout,
+ Chain(
+ FailedWithReason(pipelinerun.ReasonCouldntGetPipeline, pipelineRunName),
+ FailedWithMessage("does not contain a dev.tekton.image.name annotation", pipelineRunName),
+ ), "PipelineRunFailed", v1beta1Version); err != nil {
+ t.Fatalf("Error waiting for PipelineRun to finish with expected error: %s", err)
+ }
+}
+
func tarImageInOCIFormat(namespace string, img v1.Image) ([]byte, error) {
// Write the image in the OCI layout and then tar it up.
dir, err := os.MkdirTemp(os.TempDir(), namespace)
diff --git a/upstream/test/timeout_test.go b/upstream/test/timeout_test.go
index 2b407b66890..4b72f38b18f 100644
--- a/upstream/test/timeout_test.go
+++ b/upstream/test/timeout_test.go
@@ -57,7 +57,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 10']
`, helpers.ObjectNameForTest(t), namespace))
@@ -178,15 +178,15 @@ spec:
taskSpec:
steps:
- name: no-timeout
- image: mirror.gcr.io/busybox
+ image: busybox
script: sleep 1
timeout: 2s
- name: timeout
- image: mirror.gcr.io/busybox
+ image: busybox
script: sleep 1
timeout: 1ms
- name: canceled
- image: mirror.gcr.io/busybox
+ image: busybox
script: sleep 1
`, helpers.ObjectNameForTest(t), namespace))
t.Logf("Creating TaskRun %s in namespace %s", taskRun.Name, namespace)
@@ -242,7 +242,7 @@ spec:
- name: test
steps:
- name: timeout
- image: mirror.gcr.io/busybox
+ image: busybox
script: sleep 1
timeout: 1ms`)
@@ -277,7 +277,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 3000']
`, helpers.ObjectNameForTest(t), namespace))
@@ -333,7 +333,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 1s']
`, helpers.ObjectNameForTest(t), namespace))
@@ -343,7 +343,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 10s']
`, helpers.ObjectNameForTest(t), namespace))
@@ -454,7 +454,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 30']
`, helpers.ObjectNameForTest(t), namespace))
@@ -469,7 +469,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/busybox
+ - image: busybox
command: ['/bin/sh']
args: ['-c', 'sleep 1']
`, helpers.ObjectNameForTest(t), namespace))
@@ -560,102 +560,3 @@ spec:
}
wg.Wait()
}
-
-// TestPipelineRunTimeoutWithCompletedTaskRuns tests the case where a PipelineRun is timeout and has completed TaskRuns.
-func TestPipelineRunTimeoutWithCompletedTaskRuns(t *testing.T) {
- t.Parallel()
- // cancel the context after we have waited a suitable buffer beyond the given deadline.
- ctx, cancel := context.WithTimeout(context.Background(), timeout+2*time.Minute)
- defer cancel()
- c, namespace := setup(ctx, t)
-
- knativetest.CleanupOnInterrupt(func() { tearDown(context.Background(), t, c, namespace) }, t.Logf)
- defer tearDown(context.Background(), t, c, namespace)
-
- t.Logf("Creating Task in namespace %s", namespace)
- task := parse.MustParseV1Task(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- params:
- - name: sleep
- default: "1"
- steps:
- - image: mirror.gcr.io/busybox
- command: ['/bin/sh']
- args: ['-c', 'sleep $(params.sleep)']
-`, helpers.ObjectNameForTest(t), namespace))
- if _, err := c.V1TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Task `%s`: %s", task.Name, err)
- }
-
- pipeline := parse.MustParseV1Pipeline(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- tasks:
- - name: fast-task
- params:
- - name: sleep
- value: "1"
- taskRef:
- name: %s
- - name: slow-task
- params:
- - name: sleep
- value: "120"
- taskRef:
- name: %s
-`, helpers.ObjectNameForTest(t), namespace, task.Name, task.Name))
- pipelineRun := parse.MustParseV1PipelineRun(t, fmt.Sprintf(`
-metadata:
- name: %s
- namespace: %s
-spec:
- pipelineRef:
- name: %s
- timeouts:
- pipeline: 30s
- tasks: 30s
-`, helpers.ObjectNameForTest(t), namespace, pipeline.Name))
- if _, err := c.V1PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err)
- }
- if _, err := c.V1PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}); err != nil {
- t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err)
- }
-
- t.Logf("Waiting for PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace)
- if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, FailedWithReason(v1.PipelineRunReasonTimedOut.String(), pipelineRun.Name), "PipelineRunTimedOut", v1Version); err != nil {
- t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err)
- }
-
- taskrunList, err := c.V1TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name})
- if err != nil {
- t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err)
- }
-
- t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to time out and be cancelled", pipelineRun.Name, namespace)
- var wg sync.WaitGroup
- for _, taskrunItem := range taskrunList.Items {
- wg.Add(1)
- go func(name string) {
- defer wg.Done()
- if strings.Contains(name, "fast-task") {
- // fast-task should have completed, not timed out
- return
- }
- err := WaitForTaskRunState(ctx, c, name, FailedWithReason(v1.TaskRunReasonCancelled.String(), name), v1.TaskRunReasonCancelled.String(), v1Version)
- if err != nil {
- t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err)
- }
- }(taskrunItem.Name)
- }
- wg.Wait()
-
- if _, err := c.V1PipelineRunClient.Get(ctx, pipelineRun.Name, metav1.GetOptions{}); err != nil {
- t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err)
- }
-}
diff --git a/upstream/test/upgrade/simpleResources.yaml b/upstream/test/upgrade/simpleResources.yaml
index 6568f0d56b6..03fa3b4b49f 100644
--- a/upstream/test/upgrade/simpleResources.yaml
+++ b/upstream/test/upgrade/simpleResources.yaml
@@ -10,18 +10,18 @@ spec:
default: "response"
steps:
- name: echo-param
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "$(params.rsp)"
- name: check-workspace
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
if [ "$(workspaces.workspace.bound)" == "true" ]; then
echo "Workspace provided"
fi
sidecars:
- name: server
- image: mirror.gcr.io/alpine
+ image: alpine:3.12.0
command: ['/bin/bash']
workingDir: /foo
script: echo server
@@ -44,7 +44,7 @@ spec:
type: string
default: "1"
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: check-workspace
script: |
if [ "$(workspaces.workspace.bound)" == "true" ]; then
@@ -63,5 +63,5 @@ spec:
default: "Succeeded"
steps:
- name: verify-status
- image: mirror.gcr.io/ubuntu
+ image: ubuntu
script: echo $(params.echoStatus)
diff --git a/upstream/test/upgrade_test.go b/upstream/test/upgrade_test.go
index 3f98955032a..7d30b233fa2 100644
--- a/upstream/test/upgrade_test.go
+++ b/upstream/test/upgrade_test.go
@@ -52,11 +52,11 @@ spec:
default: "response"
steps:
- name: echo-param
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
echo "$(params.rsp)"
- name: check-workspace
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
if [ "$(workspaces.taskWorkspace.bound)" == "true" ]; then
echo "Workspace provided"
@@ -128,12 +128,12 @@ status:
status: "True"
taskSpec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: echo-param
script: |
echo "response"
- name: check-workspace
- image: mirror.gcr.io/alpine
+ image: alpine
script: |
if [ "true" == "true" ]; then
echo "Workspace provided"
@@ -155,7 +155,6 @@ status:
terminationReason: Completed
terminated:
reason: Completed
- artifacts: {}
`
expectedSimplePipelineRunYaml = `
diff --git a/upstream/test/util.go b/upstream/test/util.go
index e1254e2edb1..99e005a86b4 100644
--- a/upstream/test/util.go
+++ b/upstream/test/util.go
@@ -92,9 +92,9 @@ func header(t *testing.T, text string) {
right := " ###"
txt := left + text + right
bar := strings.Repeat("#", len(txt))
- t.Log(bar)
- t.Log(txt)
- t.Log(bar)
+ t.Logf(bar)
+ t.Logf(txt)
+ t.Logf(bar)
}
func tearDown(ctx context.Context, t *testing.T, cs *clients, namespace string) {
diff --git a/upstream/test/workingdir_test.go b/upstream/test/workingdir_test.go
index 613ef6566c0..5fc67c516e3 100644
--- a/upstream/test/workingdir_test.go
+++ b/upstream/test/workingdir_test.go
@@ -51,7 +51,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
workingDir: /workspace/HELLOMOTO
args: ['-c', 'echo YES']
`, wdTaskName, namespace))
@@ -124,7 +124,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/ubuntu
+ - image: ubuntu
workingDir: /HELLOMOTO
args: ['-c', 'echo YES']
`, wdTaskName, namespace))
diff --git a/upstream/test/workspace_test.go b/upstream/test/workspace_test.go
index db648500ad5..8a03ed1ce4e 100644
--- a/upstream/test/workspace_test.go
+++ b/upstream/test/workspace_test.go
@@ -51,7 +51,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'echo foo > /workspace/test/file'
workspaces:
- name: test
@@ -129,7 +129,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'cat /workspace/test/file'
workspaces:
- name: test
@@ -199,7 +199,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
script: 'cat /workspace/test/file'
workspaces:
- name: test
@@ -268,7 +268,7 @@ metadata:
namespace: %s
spec:
steps:
- - image: mirror.gcr.io/alpine
+ - image: alpine
name: foo
command: ['echo']
args: ['$(workspaces.test.volume)']
diff --git a/upstream/tools/go.mod b/upstream/tools/go.mod
index 43067be98c3..fea2b15c1c1 100644
--- a/upstream/tools/go.mod
+++ b/upstream/tools/go.mod
@@ -1,24 +1,21 @@
module github.com/tektoncd/pipeline/tools
-go 1.22.1
+go 1.18
-toolchain go1.22.5
-
-require github.com/golangci/golangci-lint v1.61.0
+require github.com/golangci/golangci-lint v1.57.2
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- github.com/4meepo/tagalign v1.3.4 // indirect
- github.com/Abirdcfly/dupword v0.1.1 // indirect
- github.com/Antonboom/errname v0.1.13 // indirect
- github.com/Antonboom/nilnil v0.1.9 // indirect
- github.com/Antonboom/testifylint v1.4.3 // indirect
- github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
- github.com/Crocmagnon/fatcontext v0.5.2 // indirect
+ github.com/4meepo/tagalign v1.3.3 // indirect
+ github.com/Abirdcfly/dupword v0.0.14 // indirect
+ github.com/Antonboom/errname v0.1.12 // indirect
+ github.com/Antonboom/nilnil v0.1.7 // indirect
+ github.com/Antonboom/testifylint v1.2.0 // indirect
+ github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect
- github.com/Masterminds/semver/v3 v3.3.0 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect
+ github.com/Masterminds/semver v1.5.0 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect
github.com/alecthomas/go-check-sumtype v0.1.4 // indirect
github.com/alexkohler/nakedret/v2 v2.0.4 // indirect
@@ -29,29 +26,29 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.1 // indirect
github.com/blizzy78/varnamelen v0.8.0 // indirect
- github.com/bombsimon/wsl/v4 v4.4.1 // indirect
+ github.com/bombsimon/wsl/v4 v4.2.1 // indirect
github.com/breml/bidichk v0.2.7 // indirect
github.com/breml/errchkjson v0.3.6 // indirect
github.com/butuzov/ireturn v0.3.0 // indirect
- github.com/butuzov/mirror v1.2.0 // indirect
+ github.com/butuzov/mirror v1.1.0 // indirect
github.com/catenacyber/perfsprint v0.7.1 // indirect
github.com/ccojocar/zxcvbn-go v1.0.2 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/chavacava/garif v0.1.0 // indirect
- github.com/ckaznocha/intrange v0.2.0 // indirect
+ github.com/ckaznocha/intrange v0.1.1 // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
- github.com/daixiang0/gci v0.13.5 // indirect
+ github.com/daixiang0/gci v0.12.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denis-tingaikin/go-header v0.5.0 // indirect
github.com/ettle/strcase v0.2.0 // indirect
- github.com/fatih/color v1.17.0 // indirect
+ github.com/fatih/color v1.16.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
- github.com/firefart/nonamedreturns v1.0.5 // indirect
+ github.com/firefart/nonamedreturns v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
- github.com/ghostiam/protogetter v0.3.6 // indirect
- github.com/go-critic/go-critic v0.11.4 // indirect
+ github.com/ghostiam/protogetter v0.3.5 // indirect
+ github.com/go-critic/go-critic v0.11.2 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.2.0 // indirect
@@ -59,17 +56,16 @@ require (
github.com/go-toolsmith/astp v1.1.0 // indirect
github.com/go-toolsmith/strparse v1.1.0 // indirect
github.com/go-toolsmith/typep v1.1.0 // indirect
- github.com/go-viper/mapstructure/v2 v2.1.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect
github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
github.com/gobwas/glob v0.2.3 // indirect
- github.com/gofrs/flock v0.12.1 // indirect
+ github.com/gofrs/flock v0.8.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
- github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect
- github.com/golangci/misspell v0.6.0 // indirect
- github.com/golangci/modinfo v0.3.4 // indirect
+ github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect
+ github.com/golangci/misspell v0.4.1 // indirect
github.com/golangci/plugin-module-register v0.1.1 // indirect
- github.com/golangci/revgrep v0.5.3 // indirect
+ github.com/golangci/revgrep v0.5.2 // indirect
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/gordonklaus/ineffassign v0.1.0 // indirect
@@ -77,25 +73,24 @@ require (
github.com/gostaticanalysis/comment v1.4.2 // indirect
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
- github.com/hashicorp/go-version v1.7.0 // indirect
+ github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jgautheron/goconst v1.7.1 // indirect
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
- github.com/jjti/go-spancheck v0.6.2 // indirect
+ github.com/jjti/go-spancheck v0.5.3 // indirect
github.com/julz/importas v0.1.0 // indirect
- github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect
+ github.com/karamaru-alpha/copyloopvar v1.0.10 // indirect
github.com/kisielk/errcheck v1.7.0 // indirect
github.com/kkHAIKE/contextcheck v1.1.5 // indirect
github.com/kulti/thelper v0.6.3 // indirect
github.com/kunwardeep/paralleltest v1.0.10 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
- github.com/lasiar/canonicalheader v1.1.1 // indirect
github.com/ldez/gomoddirectives v0.2.4 // indirect
github.com/ldez/tagliatelle v0.5.0 // indirect
- github.com/leonklingele/grouper v1.1.2 // indirect
+ github.com/leonklingele/grouper v1.1.1 // indirect
github.com/lufeee/execinquery v1.2.1 // indirect
github.com/macabu/inamedparam v0.1.3 // indirect
github.com/magiconair/properties v1.8.6 // indirect
@@ -106,44 +101,44 @@ require (
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/mgechev/revive v1.3.9 // indirect
+ github.com/mgechev/revive v1.3.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
- github.com/moricho/tparallel v0.3.2 // indirect
+ github.com/moricho/tparallel v0.3.1 // indirect
github.com/nakabonne/nestif v0.3.1 // indirect
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nishanths/exhaustive v0.12.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
github.com/nunnatsa/ginkgolinter v0.16.2 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
- github.com/pelletier/go-toml/v2 v2.2.3 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v1.6.0 // indirect
+ github.com/polyfloyd/go-errorlint v1.4.8 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
- github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect
- github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.2 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
- github.com/ryancurrah/gomodguard v1.3.5 // indirect
+ github.com/ryancurrah/gomodguard v1.3.1 // indirect
github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
- github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect
- github.com/securego/gosec/v2 v2.21.2 // indirect
+ github.com/sashamelentyev/usestdlibvars v1.25.0 // indirect
+ github.com/securego/gosec/v2 v2.19.0 // indirect
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sivchari/containedctx v1.0.3 // indirect
- github.com/sivchari/tenv v1.10.0 // indirect
+ github.com/sivchari/tenv v1.7.1 // indirect
github.com/sonatard/noctx v0.0.2 // indirect
github.com/sourcegraph/go-diff v0.7.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
- github.com/spf13/cobra v1.8.1 // indirect
+ github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.12.0 // indirect
@@ -152,38 +147,40 @@ require (
github.com/stretchr/objx v0.5.2 // indirect
github.com/stretchr/testify v1.9.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
+ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
github.com/tdakkota/asciicheck v0.2.0 // indirect
- github.com/tetafro/godot v1.4.17 // indirect
+ github.com/tetafro/godot v1.4.16 // indirect
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
github.com/timonwong/loggercheck v0.9.4 // indirect
- github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect
+ github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
github.com/ultraware/funlen v0.1.0 // indirect
- github.com/ultraware/whitespace v0.1.1 // indirect
- github.com/uudashr/gocognit v1.1.3 // indirect
+ github.com/ultraware/whitespace v0.1.0 // indirect
+ github.com/uudashr/gocognit v1.1.2 // indirect
github.com/xen0n/gosmopolitan v1.2.2 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
- github.com/yeya24/promlinter v0.3.0 // indirect
+ github.com/yeya24/promlinter v0.2.0 // indirect
github.com/ykadowak/zerologlint v0.1.5 // indirect
- gitlab.com/bosi/decorder v0.4.2 // indirect
- go-simpler.org/musttag v0.12.2 // indirect
- go-simpler.org/sloglint v0.7.2 // indirect
+ gitlab.com/bosi/decorder v0.4.1 // indirect
+ go-simpler.org/musttag v0.9.0 // indirect
+ go-simpler.org/sloglint v0.5.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect
+ golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect
- golang.org/x/mod v0.21.0 // indirect
- golang.org/x/sync v0.8.0 // indirect
- golang.org/x/sys v0.25.0 // indirect
- golang.org/x/text v0.18.0 // indirect
- golang.org/x/tools v0.24.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ golang.org/x/mod v0.16.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.18.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.19.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- honnef.co/go/tools v0.5.1 // indirect
- mvdan.cc/gofumpt v0.7.0 // indirect
- mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect
+ honnef.co/go/tools v0.4.7 // indirect
+ mvdan.cc/gofumpt v0.6.0 // indirect
+ mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect
)
diff --git a/upstream/tools/go.sum b/upstream/tools/go.sum
index de6bd9d31a7..e03f5115d77 100644
--- a/upstream/tools/go.sum
+++ b/upstream/tools/go.sum
@@ -35,36 +35,32 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8=
-github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0=
-github.com/Abirdcfly/dupword v0.1.1 h1:Bsxe0fIw6OwBtXMIncaTxCLHYO5BB+3mcsR5E8VXloY=
-github.com/Abirdcfly/dupword v0.1.1/go.mod h1:B49AcJdTYYkpd4HjgAcutNGG9HZ2JWwKunH9Y2BA6sM=
-github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM=
-github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns=
-github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ=
-github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ=
-github.com/Antonboom/testifylint v1.4.3 h1:ohMt6AHuHgttaQ1xb6SSnxCeK4/rnK7KKzbvs7DmEck=
-github.com/Antonboom/testifylint v1.4.3/go.mod h1:+8Q9+AOLsz5ZiQiiYujJKs9mNz398+M6UgslP4qgJLA=
+github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw=
+github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
+github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8=
+github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI=
+github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY=
+github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro=
+github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow=
+github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ=
+github.com/Antonboom/testifylint v1.2.0 h1:015bxD8zc5iY8QwTp4+RG9I4kIbqwvGX9TrBbb7jGdM=
+github.com/Antonboom/testifylint v1.2.0/go.mod h1:rkmEqjqVnHDRNsinyN6fPSLnoajzFwsCcguJgwADBkw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
-github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA=
-github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU=
-github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao=
-github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
-github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
+github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA=
github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ=
github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk=
-github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c=
github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
-github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -81,7 +77,6 @@ github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -90,16 +85,16 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ
github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM=
github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=
github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
-github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw=
-github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
+github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM=
+github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo=
github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY=
github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ=
github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA=
github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U=
github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0=
github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA=
-github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs=
-github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ=
+github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
+github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc=
github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg=
@@ -115,15 +110,15 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/ckaznocha/intrange v0.2.0 h1:FykcZuJ8BD7oX93YbO1UY9oZtkRbp+1/kJcDjkefYLs=
-github.com/ckaznocha/intrange v0.2.0/go.mod h1:r5I7nUlAAG56xmkOpw4XVr16BXhwYTUdcuRFeevn1oE=
+github.com/ckaznocha/intrange v0.1.1 h1:gHe4LfqCspWkh8KpJFs20fJz3XRHFBFUV9yI7Itu83Q=
+github.com/ckaznocha/intrange v0.1.1/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c=
-github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk=
+github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc=
+github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -135,22 +130,21 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
-github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
-github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA=
-github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw=
-github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
-github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
+github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk=
-github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw=
-github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU=
-github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc=
+github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug=
+github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw=
+github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM=
+github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -160,13 +154,9 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
-github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
-github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8=
github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU=
github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s=
@@ -180,20 +170,19 @@ github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlN
github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA=
github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA=
github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk=
-github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus=
github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw=
github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ=
github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus=
github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig=
-github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w=
-github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c=
+github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U=
github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
-github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
+github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -226,18 +215,16 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME=
-github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE=
-github.com/golangci/golangci-lint v1.61.0 h1:VvbOLaRVWmyxCnUIMTbf1kDsaJbTzH20FAMXTAlQGu8=
-github.com/golangci/golangci-lint v1.61.0/go.mod h1:e4lztIrJJgLPhWvFPDkhiMwEFRrWlmFbrZea3FsJyN8=
-github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs=
-github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo=
-github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA=
-github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM=
+github.com/golangci/golangci-lint v1.57.2 h1:NNhxfZyL5He1WWDrIvl1a4n5bvWZBcgAqBwlJAAgLTw=
+github.com/golangci/golangci-lint v1.57.2/go.mod h1:ApiG3S3Ca23QyfGp5BmsorTiVxJpr5jGiNS0BkdSidg=
+github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
+github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c=
github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc=
-github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs=
-github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k=
+github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU=
+github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA=
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs=
github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -266,8 +253,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA=
-github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@@ -284,10 +270,9 @@ github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3
github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A=
github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M=
github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY=
-github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
-github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
@@ -303,8 +288,8 @@ github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjz
github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
-github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk=
-github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA=
+github.com/jjti/go-spancheck v0.5.3 h1:vfq4s2IB8T3HvbpiwDTYgVPj1Ze/ZSXrTtaZRTc7CuM=
+github.com/jjti/go-spancheck v0.5.3/go.mod h1:eQdOX1k3T+nAKvZDyLC3Eby0La4dZ+I19iOl5NzSPFE=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -316,8 +301,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=
github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0=
-github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos=
-github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
+github.com/karamaru-alpha/copyloopvar v1.0.10 h1:8HYDy6KQYqTmD7JuhZMWS1nwPru9889XI24ROd/+WXI=
+github.com/karamaru-alpha/copyloopvar v1.0.10/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k=
github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0=
github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -328,25 +313,21 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs=
github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
-github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I=
-github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0=
github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg=
github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g=
github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo=
github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4=
-github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY=
-github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA=
+github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU=
+github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk=
@@ -370,8 +351,8 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A=
-github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU=
+github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE=
+github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -381,12 +362,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI=
-github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U=
+github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA=
+github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg=
github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
@@ -395,31 +378,27 @@ github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbn
github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4=
-github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag=
-github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8=
-github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc=
+github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
+github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
-github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
-github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
+github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo=
+github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY=
-github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw=
+github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw=
+github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
-github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@@ -442,10 +421,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo=
-github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI=
-github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
-github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
+github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs=
+github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI=
github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
@@ -454,10 +431,9 @@ github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4l
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU=
-github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE=
+github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE=
+github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0=
github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
@@ -466,10 +442,10 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
-github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI=
-github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
-github.com/securego/gosec/v2 v2.21.2 h1:deZp5zmYf3TWwU7A7cR2+SolbTpZ3HQiwFqnzQyEl3M=
-github.com/securego/gosec/v2 v2.21.2/go.mod h1:au33kg78rNseF5PwPnTWhuYBFf534bvJRvOrgZ/bFzU=
+github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU=
+github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8=
+github.com/securego/gosec/v2 v2.19.0 h1:gl5xMkOI0/E6Hxx0XCY2XujA3V7SNSefA8sC+3f1gnk=
+github.com/securego/gosec/v2 v2.19.0/go.mod h1:hOkDcHz9J/XIgIlPDXalxjeVYsHxoWUc5zJSHxcB8YM=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
@@ -481,8 +457,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE=
github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4=
-github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0=
-github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY=
+github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak=
+github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg=
github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00=
github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo=
github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0=
@@ -491,8 +467,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
-github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -521,34 +497,36 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8=
+github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk=
github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM=
github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg=
github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA=
github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.17 h1:pGzu+Ye7ZUEFx7LHU0dAKmCOXWsPjl7qA6iMGndsjPs=
-github.com/tetafro/godot v1.4.17/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
+github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0=
+github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4=
github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg=
-github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4=
-github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
+github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs=
+github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
-github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ=
-github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8=
-github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM=
-github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U=
+github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw=
+github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0=
+github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI=
+github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k=
github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
-github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs=
-github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4=
+github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
+github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw=
github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -558,14 +536,13 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo=
-gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8=
-go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ=
-go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28=
-go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs=
-go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM=
-go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY=
-go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo=
+gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4=
+gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA=
+go-simpler.org/assert v0.7.0 h1:OzWWZqfNxt8cLS+MlUp6Tgk1HjPkmgdKBq9qvy8lZsA=
+go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos=
+go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4=
+go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY=
+go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -576,7 +553,6 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
@@ -599,8 +575,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk=
-golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM=
+golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
@@ -633,8 +609,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
-golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -673,8 +649,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
-golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -694,8 +669,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
-golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -741,6 +716,7 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -749,8 +725,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
-golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -767,8 +743,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
-golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -827,13 +803,14 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
-golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -913,14 +890,14 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -941,12 +918,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
-honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
-mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU=
-mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo=
-mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U=
-mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ=
+honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs=
+honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
+mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo=
+mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA=
+mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w=
+mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/upstream/vendor/cloud.google.com/go/.gitignore b/upstream/vendor/cloud.google.com/go/.gitignore
deleted file mode 100644
index cc7e53b46c0..00000000000
--- a/upstream/vendor/cloud.google.com/go/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-# Editors
-.idea
-.vscode
-*.swp
-.history
-
-# Test files
-*.test
-coverage.txt
-
-# Other
-.DS_Store
diff --git a/upstream/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/upstream/vendor/cloud.google.com/go/.release-please-manifest-individual.json
deleted file mode 100644
index c434cab6bc0..00000000000
--- a/upstream/vendor/cloud.google.com/go/.release-please-manifest-individual.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "aiplatform": "1.67.0",
- "auth": "0.4.0",
- "auth/oauth2adapt": "0.2.2",
- "bigquery": "1.61.0",
- "bigtable": "1.23.0",
- "datastore": "1.17.0",
- "errorreporting": "0.3.0",
- "firestore": "1.15.0",
- "logging": "1.9.0",
- "profiler": "0.4.0",
- "pubsub": "1.38.0",
- "pubsublite": "1.8.1",
- "spanner": "1.61.0",
- "storage": "1.40.0",
- "vertexai": "0.8.0"
-}
diff --git a/upstream/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/upstream/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
deleted file mode 100644
index 81dcc66deb1..00000000000
--- a/upstream/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
+++ /dev/null
@@ -1,145 +0,0 @@
-{
- "accessapproval": "1.7.7",
- "accesscontextmanager": "1.8.7",
- "advisorynotifications": "1.4.1",
- "alloydb": "1.10.2",
- "analytics": "0.23.2",
- "apigateway": "1.6.7",
- "apigeeconnect": "1.6.7",
- "apigeeregistry": "0.8.5",
- "apikeys": "1.1.7",
- "appengine": "1.8.7",
- "apphub": "0.1.1",
- "apps": "0.4.2",
- "area120": "0.8.7",
- "artifactregistry": "1.14.9",
- "asset": "1.19.1",
- "assuredworkloads": "1.11.7",
- "automl": "1.13.7",
- "backupdr": "0.1.1",
- "baremetalsolution": "1.2.6",
- "batch": "1.8.5",
- "beyondcorp": "1.0.6",
- "billing": "1.18.5",
- "binaryauthorization": "1.8.3",
- "certificatemanager": "1.8.1",
- "channel": "1.17.7",
- "chat": "0.1.0",
- "cloudbuild": "1.16.1",
- "cloudcontrolspartner": "0.2.1",
- "clouddms": "1.7.6",
- "cloudprofiler": "0.3.2",
- "cloudquotas": "0.2.1",
- "cloudtasks": "1.12.8",
- "commerce": "0.1.6",
- "compute": "1.26.0",
- "compute/metadata": "0.3.0",
- "confidentialcomputing": "1.5.1",
- "config": "0.4.0",
- "contactcenterinsights": "1.13.2",
- "container": "1.35.1",
- "containeranalysis": "0.11.6",
- "datacatalog": "1.20.1",
- "dataflow": "0.9.7",
- "dataform": "0.9.4",
- "datafusion": "1.7.7",
- "datalabeling": "0.8.7",
- "dataplex": "1.16.0",
- "dataproc": "2.4.2",
- "dataqna": "0.8.7",
- "datastream": "1.10.6",
- "deploy": "1.18.1",
- "dialogflow": "1.53.0",
- "discoveryengine": "1.7.1",
- "dlp": "1.13.0",
- "documentai": "1.28.0",
- "domains": "0.9.7",
- "edgecontainer": "1.2.1",
- "edgenetwork": "0.2.4",
- "essentialcontacts": "1.6.8",
- "eventarc": "1.13.6",
- "filestore": "1.8.3",
- "functions": "1.16.2",
- "gkebackup": "1.4.1",
- "gkeconnect": "0.8.7",
- "gkehub": "0.14.7",
- "gkemulticloud": "1.1.3",
- "grafeas": "0.3.6",
- "gsuiteaddons": "1.6.7",
- "iam": "1.1.8",
- "iap": "1.9.6",
- "identitytoolkit": "0.0.0",
- "ids": "1.4.7",
- "iot": "1.7.7",
- "kms": "1.16.0",
- "language": "1.12.5",
- "lifesciences": "0.9.7",
- "longrunning": "0.5.7",
- "managedidentities": "1.6.7",
- "maps": "1.8.0",
- "mediatranslation": "0.8.7",
- "memcache": "1.10.7",
- "metastore": "1.13.6",
- "migrationcenter": "0.2.6",
- "monitoring": "1.19.0",
- "netapp": "0.2.8",
- "networkconnectivity": "1.14.6",
- "networkmanagement": "1.13.2",
- "networksecurity": "0.9.7",
- "notebooks": "1.11.5",
- "optimization": "1.6.5",
- "orchestration": "1.9.2",
- "orgpolicy": "1.12.3",
- "osconfig": "1.12.7",
- "oslogin": "1.13.3",
- "parallelstore": "0.2.0",
- "phishingprotection": "0.8.7",
- "policysimulator": "0.2.5",
- "policytroubleshooter": "1.10.5",
- "privatecatalog": "0.9.7",
- "rapidmigrationassessment": "1.0.7",
- "recaptchaenterprise": "2.13.0",
- "recommendationengine": "0.8.7",
- "recommender": "1.12.3",
- "redis": "1.14.4",
- "resourcemanager": "1.9.7",
- "resourcesettings": "1.6.7",
- "retail": "1.16.2",
- "run": "1.3.7",
- "scheduler": "1.10.8",
- "secretmanager": "1.13.0",
- "securesourcemanager": "0.1.5",
- "security": "1.16.1",
- "securitycenter": "1.30.0",
- "securitycentermanagement": "0.1.8",
- "securityposture": "0.1.3",
- "servicecontrol": "1.13.2",
- "servicedirectory": "1.11.6",
- "servicehealth": "0.1.4",
- "servicemanagement": "1.9.8",
- "serviceusage": "1.8.6",
- "shell": "1.7.7",
- "shopping": "0.6.0",
- "speech": "1.23.1",
- "storageinsights": "1.0.7",
- "storagetransfer": "1.10.6",
- "streetview": "0.1.0",
- "support": "1.0.6",
- "talent": "1.6.8",
- "telcoautomation": "0.2.2",
- "texttospeech": "1.7.7",
- "tpu": "1.6.7",
- "trace": "1.10.7",
- "translate": "1.10.3",
- "video": "1.20.6",
- "videointelligence": "1.11.7",
- "vision": "2.8.2",
- "visionai": "0.1.2",
- "vmmigration": "1.7.7",
- "vmwareengine": "1.1.3",
- "vpcaccess": "1.7.7",
- "webrisk": "1.9.7",
- "websecurityscanner": "1.6.7",
- "workflows": "1.12.6",
- "workstations": "0.5.6"
-}
diff --git a/upstream/vendor/cloud.google.com/go/.release-please-manifest.json b/upstream/vendor/cloud.google.com/go/.release-please-manifest.json
deleted file mode 100644
index f85d240a3dc..00000000000
--- a/upstream/vendor/cloud.google.com/go/.release-please-manifest.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- ".": "0.113.0"
-}
diff --git a/upstream/vendor/cloud.google.com/go/CHANGES.md b/upstream/vendor/cloud.google.com/go/CHANGES.md
deleted file mode 100644
index bba862161d5..00000000000
--- a/upstream/vendor/cloud.google.com/go/CHANGES.md
+++ /dev/null
@@ -1,2620 +0,0 @@
-# Changes
-
-## [0.113.0](https://github.com/googleapis/google-cloud-go/compare/v0.112.2...v0.113.0) (2024-05-08)
-
-
-### Features
-
-* **civil:** Add Compare method to Date, Time, and DateTime ([#10010](https://github.com/googleapis/google-cloud-go/issues/10010)) ([34455c1](https://github.com/googleapis/google-cloud-go/commit/34455c15d62b089f3281ff4c663245e72b257f37))
-
-
-### Bug Fixes
-
-* **all:** Bump x/net to v0.24.0 ([#10000](https://github.com/googleapis/google-cloud-go/issues/10000)) ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-* **debugger:** Add internaloption.WithDefaultEndpointTemplate ([3b41408](https://github.com/googleapis/google-cloud-go/commit/3b414084450a5764a0248756e95e13383a645f90))
-* **internal/aliasfix:** Handle import paths correctly ([#10097](https://github.com/googleapis/google-cloud-go/issues/10097)) ([fafaf0d](https://github.com/googleapis/google-cloud-go/commit/fafaf0d0a293096559a4655ea61062cb896f1568))
-* **rpcreplay:** Properly unmarshal dynamic message ([#9774](https://github.com/googleapis/google-cloud-go/issues/9774)) ([53ccb20](https://github.com/googleapis/google-cloud-go/commit/53ccb20d925ccb00f861958d9658b55738097dc6)), refs [#9773](https://github.com/googleapis/google-cloud-go/issues/9773)
-
-
-### Documentation
-
-* **testing:** Switch deprecated WithInsecure to WithTransportCredentials ([#10091](https://github.com/googleapis/google-cloud-go/issues/10091)) ([2b576ab](https://github.com/googleapis/google-cloud-go/commit/2b576abd1c3bfca2f962de0e024524f72d3652c0))
-
-## [0.112.2](https://github.com/googleapis/google-cloud-go/compare/v0.112.1...v0.112.2) (2024-03-27)
-
-
-### Bug Fixes
-
-* **all:** Release protobuf dep bump ([#9586](https://github.com/googleapis/google-cloud-go/issues/9586)) ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## [0.112.1](https://github.com/googleapis/google-cloud-go/compare/v0.112.0...v0.112.1) (2024-02-26)
-
-
-### Bug Fixes
-
-* **internal/postprocessor:** Handle googleapis link in commit body ([#9251](https://github.com/googleapis/google-cloud-go/issues/9251)) ([1dd3515](https://github.com/googleapis/google-cloud-go/commit/1dd35157bff871a2b3e5b0e3cac33502737fd631))
-
-
-### Documentation
-
-* **main:** Add OpenTelemetry-Go compatibility warning to debug.md ([#9268](https://github.com/googleapis/google-cloud-go/issues/9268)) ([18f9bb9](https://github.com/googleapis/google-cloud-go/commit/18f9bb94fbc239255a873b29462fc7c2eac3c0aa)), refs [#9267](https://github.com/googleapis/google-cloud-go/issues/9267)
-
-## [0.112.0](https://github.com/googleapis/google-cloud-go/compare/v0.111.0...v0.112.0) (2024-01-11)
-
-
-### Features
-
-* **internal/trace:** Export internal/trace package constants and vars ([#9242](https://github.com/googleapis/google-cloud-go/issues/9242)) ([941c16f](https://github.com/googleapis/google-cloud-go/commit/941c16f3a2602e9bdc737b139060a7dd8318f9dd))
-
-
-### Documentation
-
-* **main:** Add telemetry discussion to debug.md ([#9074](https://github.com/googleapis/google-cloud-go/issues/9074)) ([90ed12e](https://github.com/googleapis/google-cloud-go/commit/90ed12e1dffe722b42f58556f0e17b808da9714d)), refs [#8655](https://github.com/googleapis/google-cloud-go/issues/8655)
-
-## [0.111.0](https://github.com/googleapis/google-cloud-go/compare/v0.110.10...v0.111.0) (2023-11-29)
-
-
-### Features
-
-* **internal/trace:** Add OpenTelemetry support ([#8655](https://github.com/googleapis/google-cloud-go/issues/8655)) ([7a46b54](https://github.com/googleapis/google-cloud-go/commit/7a46b5428f239871993d66be2c7c667121f60a6f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205)
-
-
-### Bug Fixes
-
-* **all:** Bump google.golang.org/api to v0.149.0 ([#8959](https://github.com/googleapis/google-cloud-go/issues/8959)) ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
-
-## [0.110.10](https://github.com/googleapis/google-cloud-go/compare/v0.110.9...v0.110.10) (2023-10-31)
-
-
-### Bug Fixes
-
-* **all:** Update grpc-go to v1.56.3 ([#8916](https://github.com/googleapis/google-cloud-go/issues/8916)) ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
-* **all:** Update grpc-go to v1.59.0 ([#8922](https://github.com/googleapis/google-cloud-go/issues/8922)) ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
-* **internal/godocfx:** Fix links to other packages in summary ([#8756](https://github.com/googleapis/google-cloud-go/issues/8756)) ([6220a9a](https://github.com/googleapis/google-cloud-go/commit/6220a9afeb89df3080e9e663e97648939fd4e15f))
-
-## [0.110.9](https://github.com/googleapis/google-cloud-go/compare/v0.110.8...v0.110.9) (2023-10-19)
-
-
-### Bug Fixes
-
-* **all:** Update golang.org/x/net to v0.17.0 ([#8705](https://github.com/googleapis/google-cloud-go/issues/8705)) ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/aliasgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/examples/fake:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/gapicgen:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/generated/snippets:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/godocfx:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **internal/postprocessor:** Add ability to override release level ([#8643](https://github.com/googleapis/google-cloud-go/issues/8643)) ([26c608a](https://github.com/googleapis/google-cloud-go/commit/26c608a8204d740767dfebf6aa473cdf1873e5f0))
-* **internal/postprocessor:** Add missing assignment ([#8646](https://github.com/googleapis/google-cloud-go/issues/8646)) ([d8c5746](https://github.com/googleapis/google-cloud-go/commit/d8c5746e6dde1bd34c01a9886804f861c88c0cb7))
-* **internal/postprocessor:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-
-## [0.110.8](https://github.com/googleapis/google-cloud-go/compare/v0.110.7...v0.110.8) (2023-09-11)
-
-
-### Documentation
-
-* **postprocessor:** Nudge users towards stable clients ([#8513](https://github.com/googleapis/google-cloud-go/issues/8513)) ([05a1484](https://github.com/googleapis/google-cloud-go/commit/05a1484b0752aaa3d6a164d37686d6de070cc78d))
-
-## [0.110.7](https://github.com/googleapis/google-cloud-go/compare/v0.110.6...v0.110.7) (2023-07-31)
-
-
-### Bug Fixes
-
-* **main:** Add more docs to base package ([c401ab4](https://github.com/googleapis/google-cloud-go/commit/c401ab4a576c64ab2b8840a90f7ccd5d031cea57))
-
-## [0.110.6](https://github.com/googleapis/google-cloud-go/compare/v0.110.5...v0.110.6) (2023-07-13)
-
-
-### Bug Fixes
-
-* **httpreplay:** Ignore GCS header by default ([#8260](https://github.com/googleapis/google-cloud-go/issues/8260)) ([b961a1a](https://github.com/googleapis/google-cloud-go/commit/b961a1abe7aeafe420c88eed38035fed0bbf7bbe)), refs [#8233](https://github.com/googleapis/google-cloud-go/issues/8233)
-
-## [0.110.5](https://github.com/googleapis/google-cloud-go/compare/v0.110.4...v0.110.5) (2023-07-07)
-
-
-### Bug Fixes
-
-* **logadmin:** Use consistent filter in paging example ([#8221](https://github.com/googleapis/google-cloud-go/issues/8221)) ([9570159](https://github.com/googleapis/google-cloud-go/commit/95701597b1d709543ea22a4b6ff9b28b14a2d4fc))
-
-## [0.110.4](https://github.com/googleapis/google-cloud-go/compare/v0.110.3...v0.110.4) (2023-07-05)
-
-
-### Bug Fixes
-
-* **internal/retry:** Simplify gRPC status code mapping of retry error ([#8196](https://github.com/googleapis/google-cloud-go/issues/8196)) ([e8b224a](https://github.com/googleapis/google-cloud-go/commit/e8b224a3bcb0ca9430990ef6ae8ddb7b60f5225d))
-
-## [0.110.3](https://github.com/googleapis/google-cloud-go/compare/v0.110.2...v0.110.3) (2023-06-23)
-
-
-### Bug Fixes
-
-* **internal/retry:** Never return nil from GRPCStatus() ([#8128](https://github.com/googleapis/google-cloud-go/issues/8128)) ([005d2df](https://github.com/googleapis/google-cloud-go/commit/005d2dfb6b68bf5a35bfb8db449d3f0084b34d6e))
-
-
-### Documentation
-
-* **v1:** Minor clarifications for TaskGroup and min_cpu_platform ([3382ef8](https://github.com/googleapis/google-cloud-go/commit/3382ef81b6bcefe1c7bfc14aa5ff9bbf25850966))
-
-## [0.110.2](https://github.com/googleapis/google-cloud-go/compare/v0.110.1...v0.110.2) (2023-05-08)
-
-
-### Bug Fixes
-
-* **deps:** Update grpc to v1.55.0 ([#7885](https://github.com/googleapis/google-cloud-go/issues/7885)) ([9fc48a9](https://github.com/googleapis/google-cloud-go/commit/9fc48a921428c94c725ea90415d55ff0c177dd81))
-
-## [0.110.1](https://github.com/googleapis/google-cloud-go/compare/v0.110.0...v0.110.1) (2023-05-03)
-
-
-### Bug Fixes
-
-* **httpreplay:** Add ignore-header flag, fix tests ([#7865](https://github.com/googleapis/google-cloud-go/issues/7865)) ([1829706](https://github.com/googleapis/google-cloud-go/commit/1829706c5ade36cc786b2e6780fda5e7302f965b))
-
-## [0.110.0](https://github.com/googleapis/google-cloud-go/compare/v0.109.0...v0.110.0) (2023-02-15)
-
-
-### Features
-
-* **internal/postprocessor:** Detect and initialize new modules ([#7288](https://github.com/googleapis/google-cloud-go/issues/7288)) ([59ce02c](https://github.com/googleapis/google-cloud-go/commit/59ce02c13f265741a8f1f0f7ad5109bf83e3df82))
-* **internal/postprocessor:** Only regen snippets for changed modules ([#7300](https://github.com/googleapis/google-cloud-go/issues/7300)) ([220f8a5](https://github.com/googleapis/google-cloud-go/commit/220f8a5ad2fd64b75c5a1af531b1ab4597cf17d7))
-
-
-### Bug Fixes
-
-* **internal/postprocessor:** Add scopes without OwlBot api-name feature ([#7404](https://github.com/googleapis/google-cloud-go/issues/7404)) ([f7fe4f6](https://github.com/googleapis/google-cloud-go/commit/f7fe4f68ebf2ca28efd282f3419329dd2c09d245))
-* **internal/postprocessor:** Include module and package in scope ([#7294](https://github.com/googleapis/google-cloud-go/issues/7294)) ([d2c5c84](https://github.com/googleapis/google-cloud-go/commit/d2c5c8449f6939301f0fd506282e8fc73fc84f96))
-
-## [0.109.0](https://github.com/googleapis/google-cloud-go/compare/v0.108.0...v0.109.0) (2023-01-18)
-
-
-### Features
-
-* **internal/postprocessor:** Make OwlBot postprocessor ([#7202](https://github.com/googleapis/google-cloud-go/issues/7202)) ([7a1022e](https://github.com/googleapis/google-cloud-go/commit/7a1022e215261d679c8496cdd35a9cad1f13e527))
-
-## [0.108.0](https://github.com/googleapis/google-cloud-go/compare/v0.107.0...v0.108.0) (2023-01-05)
-
-
-### Features
-
-* **all:** Enable REGAPIC and REST numeric enums ([#6999](https://github.com/googleapis/google-cloud-go/issues/6999)) ([28f3572](https://github.com/googleapis/google-cloud-go/commit/28f3572addb0f563a2a42a76977b4e083191613f))
-* **debugger:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** Disable rest for non-rest APIs ([#7157](https://github.com/googleapis/google-cloud-go/issues/7157)) ([ab332ce](https://github.com/googleapis/google-cloud-go/commit/ab332ced06f6c07909444e4528c02a8b6a0a70a6))
-
-## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15)
-
-
-### Features
-
-* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b))
-
-## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09)
-
-
-### Features
-
-* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
-
-## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24)
-
-
-### Features
-
-* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490))
-
-## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29)
-
-
-### Features
-
-* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68))
-
-## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17)
-
-
-### Bug Fixes
-
-* **longrunning:** regapic remove path params duped as query params ([#6183](https://github.com/googleapis/google-cloud-go/issues/6183)) ([c963be3](https://github.com/googleapis/google-cloud-go/commit/c963be301f074779e6bb8c897d8064fa076e9e35))
-
-## [0.102.0](https://github.com/googleapis/google-cloud-go/compare/v0.101.1...v0.102.0) (2022-05-24)
-
-
-### Features
-
-* **civil:** add Before and After methods to civil.Time ([#5703](https://github.com/googleapis/google-cloud-go/issues/5703)) ([7acaaaf](https://github.com/googleapis/google-cloud-go/commit/7acaaafef47668c3e8382b8bc03475598c3db187))
-
-### [0.101.1](https://github.com/googleapis/google-cloud-go/compare/v0.101.0...v0.101.1) (2022-05-03)
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** properly update modules that have no gapic changes ([#5945](https://github.com/googleapis/google-cloud-go/issues/5945)) ([de2befc](https://github.com/googleapis/google-cloud-go/commit/de2befcaa2a886499db9da6d4d04d28398c8d44b))
-
-## [0.101.0](https://github.com/googleapis/google-cloud-go/compare/v0.100.2...v0.101.0) (2022-04-20)
-
-
-### Features
-
-* **all:** bump grpc dep ([#5481](https://github.com/googleapis/google-cloud-go/issues/5481)) ([b12964d](https://github.com/googleapis/google-cloud-go/commit/b12964df5c63c647aaf204e73cfcdfd379d19682))
-* **internal/gapicgen:** change versionClient for gapics ([#5687](https://github.com/googleapis/google-cloud-go/issues/5687)) ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** add generation of internal/version.go for new client modules ([#5726](https://github.com/googleapis/google-cloud-go/issues/5726)) ([341e0df](https://github.com/googleapis/google-cloud-go/commit/341e0df1e44480706180cc5b07c49b3cee904095))
-* **internal/gapicgen:** don't gen version files for longrunning and debugger ([#5698](https://github.com/googleapis/google-cloud-go/issues/5698)) ([3a81108](https://github.com/googleapis/google-cloud-go/commit/3a81108c74cd8864c56b8ab5939afd864db3c64b))
-* **internal/gapicgen:** don't try to make snippets for non-gapics ([#5919](https://github.com/googleapis/google-cloud-go/issues/5919)) ([c94dddc](https://github.com/googleapis/google-cloud-go/commit/c94dddc60ef83a0584ba8f7dd24589d9db971672))
-* **internal/gapicgen:** move breaking change indicator if present ([#5452](https://github.com/googleapis/google-cloud-go/issues/5452)) ([e712df5](https://github.com/googleapis/google-cloud-go/commit/e712df5ebb45598a1653081d7e11e578bad22ff8))
-* **internal/godocfx:** prevent errors for filtered mods ([#5485](https://github.com/googleapis/google-cloud-go/issues/5485)) ([6cb9b89](https://github.com/googleapis/google-cloud-go/commit/6cb9b89b2d654c695eab00d8fb375cce0cd6e059))
-
-## [0.100.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.99.0...v0.100.0) (2022-01-04)
-
-
-### Features
-
-* **analytics/admin:** add the `AcknowledgeUserDataCollection` operation which acknowledges the terms of user data collection for the specified property feat: add the new resource type `DataStream`, which is planned to eventually replace `WebDataStream`, `IosAppDataStream`, `AndroidAppDataStream` resources fix!: remove `GetEnhancedMeasurementSettings`, `UpdateEnhancedMeasurementSettingsRequest`, `UpdateEnhancedMeasurementSettingsRequest` operations from the API feat: add `CreateDataStream`, `DeleteDataStream`, `UpdateDataStream`, `ListDataStreams` operations to support the new `DataStream` resource feat: add `DISPLAY_VIDEO_360_ADVERTISER_LINK`, `DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL` fields to `ChangeHistoryResourceType` enum feat: add the `account` field to the `Property` type docs: update the documentation with a new list of valid values for `UserLink.direct_roles` field ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **assuredworkloads:** EU Regions and Support With Sovereign Controls ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
-* **dialogflow/cx:** added the display name of the current page in webhook requests ([e0833b2](https://www.github.com/googleapis/google-cloud-go/commit/e0833b2853834ba79fd20ca2ae9c613d585dd2a5))
-* **dialogflow:** added export documentation method feat: added filter in list documentations request feat: added option to import custom metadata from Google Cloud Storage in reload document request feat: added option to apply partial update to the smart messaging allowlist in reload document request feat: added filter in list knowledge bases request ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **dialogflow:** removed OPTIONAL for speech model variant docs: added more docs for speech model variant and improved docs format for participant ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **recaptchaenterprise:** add new reCAPTCHA Enterprise fraud annotations ([3dd34a2](https://www.github.com/googleapis/google-cloud-go/commit/3dd34a262edbff63b9aece8faddc2ff0d98ce42a))
-
-
-### Bug Fixes
-
-* **artifactregistry:** fix resource pattern ID segment name ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **compute:** add parameter in compute bazel rules ([#692](https://www.github.com/googleapis/google-cloud-go/issues/692)) ([5444809](https://www.github.com/googleapis/google-cloud-go/commit/5444809e0b7cf9f5416645ea2df6fec96f8b9023))
-* **profiler:** refine regular expression for parsing backoff duration in E2E tests ([#5229](https://www.github.com/googleapis/google-cloud-go/issues/5229)) ([4438aeb](https://www.github.com/googleapis/google-cloud-go/commit/4438aebca2ec01d4dbf22287aa651937a381e043))
-* **profiler:** remove certificate expiration workaround ([#5222](https://www.github.com/googleapis/google-cloud-go/issues/5222)) ([2da36c9](https://www.github.com/googleapis/google-cloud-go/commit/2da36c95f44d5f88fd93cd949ab78823cea74fe7))
-
-## [0.99.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.98.0...v0.99.0) (2021-12-06)
-
-
-### Features
-
-* **dialogflow/cx:** added `TelephonyTransferCall` in response message ([fe27098](https://www.github.com/googleapis/google-cloud-go/commit/fe27098e5d429911428821ded57384353e699774))
-
-## [0.98.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.97.0...v0.98.0) (2021-12-03)
-
-
-### Features
-
-* **aiplatform:** add enable_private_service_connect field to Endpoint feat: add id field to DeployedModel feat: add service_attachment field to PrivateEndpoints feat: add endpoint_id to CreateEndpointRequest and method signature to CreateEndpoint feat: add method signature to CreateFeatureStore, CreateEntityType, CreateFeature feat: add network and enable_private_service_connect to IndexEndpoint feat: add service_attachment to IndexPrivateEndpoints feat: add stratified_split field to training_pipeline InputDataConfig ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
-* **aiplatform:** add featurestore service to aiplatform v1 feat: add metadata service to aiplatform v1 ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
-* **aiplatform:** Adds support for `google.protobuf.Value` pipeline parameters in the `parameter_values` field ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
-* **aiplatform:** Tensorboard v1 protos release feat:Exposing a field for v1 CustomJob-Tensorboard integration. ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
-* **binaryauthorization:** add new admission rule types to Policy feat: update SignatureAlgorithm enum to match algorithm names in KMS feat: add SystemPolicyV1Beta1 service ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **channel:** add resource type to ChannelPartnerLink ([c206948](https://www.github.com/googleapis/google-cloud-go/commit/c2069487f6af5bcb37d519afeb60e312e35e67d5))
-* **cloudtasks:** add C++ rules for Cloud Tasks ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
-* **compute:** Move compute.v1 from googleapis-discovery to googleapis ([#675](https://www.github.com/googleapis/google-cloud-go/issues/675)) ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **compute:** Switch to string enums for compute ([#685](https://www.github.com/googleapis/google-cloud-go/issues/685)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **contactcenterinsights:** Add ability to update phrase matchers feat: Add issue model stats to time series feat: Add display name to issue model stats ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **contactcenterinsights:** Add WriteDisposition to BigQuery Export API ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
-* **contactcenterinsights:** deprecate issue_matches docs: if conversation medium is unspecified, it will default to PHONE_CALL ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
-* **contactcenterinsights:** new feature flag disable_issue_modeling docs: fixed formatting issues in the reference documentation ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **contactcenterinsights:** remove feature flag disable_issue_modeling ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **datacatalog:** Added BigQueryDateShardedSpec.latest_shard_resource field feat: Added SearchCatalogResult.display_name field feat: Added SearchCatalogResult.description field ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **dataproc:** add Dataproc Serverless for Spark Batches API ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
-* **dataproc:** Add support for dataproc BatchController service ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
-* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
-* **dialogflow/cx:** added API for changelogs docs: clarified semantic of the streaming APIs ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
-* **dialogflow/cx:** added support for comparing between versions docs: clarified security settings API reference ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
-* **dialogflow/cx:** added support for Deployments with ListDeployments and GetDeployment apis feat: added support for DeployFlow api under Environments feat: added support for TestCasesConfig under Environment docs: added long running operation explanation for several apis fix!: marked resource name of security setting as not-required ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
-* **dialogflow/cx:** allow setting custom CA for generic webhooks and release CompareVersions API docs: clarify DLP template reader usage ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
-* **dialogflow:** added support to configure security settings, language code and time zone on conversation profile ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **dialogflow:** support document metadata filter in article suggestion and smart reply model in human agent assistant ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
-* **dlp:** added deidentify replacement dictionaries feat: added field for BigQuery inspect template inclusion lists feat: added field to support infotype versioning ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
-* **domains:** added library for Cloud Domains v1 API. Also added methods for the transfer-in flow docs: improved API comments ([8519b94](https://www.github.com/googleapis/google-cloud-go/commit/8519b948fee5dc82d39300c4d96e92c85fe78fe6))
-* **functions:** Secret Manager integration fields 'secret_environment_variables' and 'secret_volumes' added feat: CMEK integration fields 'kms_key_name' and 'docker_repository' added ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **kms:** add OAEP+SHA1 to the list of supported algorithms ([8c5c6cf](https://www.github.com/googleapis/google-cloud-go/commit/8c5c6cf9df046b67998a8608d05595bd9e34feb0))
-* **kms:** add RPC retry information for MacSign, MacVerify, and GenerateRandomBytes Committer: [@bdhess](https://www.github.com/bdhess) ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
-* **kms:** add support for Raw PKCS[#1](https://www.github.com/googleapis/google-cloud-go/issues/1) signing keys ([58bea89](https://www.github.com/googleapis/google-cloud-go/commit/58bea89a3d177d5c431ff19310794e3296253353))
-* **monitoring/apiv3:** add CreateServiceTimeSeries RPC ([9e41088](https://www.github.com/googleapis/google-cloud-go/commit/9e41088bb395fbae0e757738277d5c95fa2749c8))
-* **monitoring/dashboard:** Added support for auto-close configurations ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
-* **monitoring/metricsscope:** promote apiv1 to GA ([#5135](https://www.github.com/googleapis/google-cloud-go/issues/5135)) ([33c0f63](https://www.github.com/googleapis/google-cloud-go/commit/33c0f63e0e0ce69d9ef6e57b04d1b8cc10ed2b78))
-* **osconfig:** OSConfig: add OS policy assignment rpcs ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
-* **osconfig:** Update OSConfig API ([e33350c](https://www.github.com/googleapis/google-cloud-go/commit/e33350cfcabcddcda1a90069383d39c68deb977a))
-* **osconfig:** Update osconfig v1 and v1alpha RecurringSchedule.Frequency with DAILY frequency ([59e548a](https://www.github.com/googleapis/google-cloud-go/commit/59e548acc249c7bddd9c884c2af35d582a408c4d))
-* **recaptchaenterprise:** add reCAPTCHA Enterprise account defender API methods ([88a1cdb](https://www.github.com/googleapis/google-cloud-go/commit/88a1cdbef3cc337354a61bc9276725bfb9a686d8))
-* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **redis:** [Cloud Memorystore for Redis] Support Multiple Read Replicas when creating Instance ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **security/privateca:** add IAMPolicy & Locations mix-in support ([1a0720f](https://www.github.com/googleapis/google-cloud-go/commit/1a0720f2f33bb14617f5c6a524946a93209e1266))
-* **securitycenter:** Added a new API method UpdateExternalSystem, which enables updating a finding w/ external system metadata. External systems are a child resource under finding, and are housed on the finding itself, and can also be filtered on in Notifications, the ListFindings and GroupFindings API ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **securitycenter:** Added mute related APIs, proto messages and fields ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
-* **securitycenter:** Added resource type and display_name field to the FindingResult, and supported them in the filter for ListFindings and GroupFindings. Also added display_name to the resource which is surfaced in NotificationMessage ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-* **securitycenter:** Added vulnerability field to the finding feat: Added type field to the resource which is surfaced in NotificationMessage ([090cc3a](https://www.github.com/googleapis/google-cloud-go/commit/090cc3ae0f8747a14cc904fc6d429e2f5379bb03))
-* **servicecontrol:** add C++ rules for many Cloud services ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **speech:** add result_end_time to SpeechRecognitionResult ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
-* **speech:** added alternative_language_codes to RecognitionConfig feat: WEBM_OPUS codec feat: SpeechAdaptation configuration feat: word confidence feat: spoken punctuation and spoken emojis feat: hint boost in SpeechContext ([a2c0bef](https://www.github.com/googleapis/google-cloud-go/commit/a2c0bef551489c9f1d0d12b973d3bf095354841e))
-* **texttospeech:** update v1 proto ([90e2868](https://www.github.com/googleapis/google-cloud-go/commit/90e2868a3d220aa7f897438f4917013fda7a7c59))
-* **workflows/executions:** add a stack_trace field to the Error messages specifying where the error occured feat: add call_log_level field to Execution messages doc: clarify requirement to escape strings within JSON arguments ([1f5aa78](https://www.github.com/googleapis/google-cloud-go/commit/1f5aa78a4d6633871651c89a6d9c48e3409fecc5))
-
-
-### Bug Fixes
-
-* **accesscontextmanager:** nodejs package name access-context-manager ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
-* **aiplatform:** Remove invalid resource annotations ([587bba5](https://www.github.com/googleapis/google-cloud-go/commit/587bba5ad792a92f252107aa38c6af50fb09fb58))
-* **compute/metadata:** return an error when all retries have failed ([#5063](https://www.github.com/googleapis/google-cloud-go/issues/5063)) ([c792a0d](https://www.github.com/googleapis/google-cloud-go/commit/c792a0d13db019c9964efeee5c6bc85b07ca50fa)), refs [#5062](https://www.github.com/googleapis/google-cloud-go/issues/5062)
-* **compute:** make parent_id fields required compute move and insert methods ([#686](https://www.github.com/googleapis/google-cloud-go/issues/686)) ([c8271d4](https://www.github.com/googleapis/google-cloud-go/commit/c8271d4b217a6e6924d9f87eac9468c4b5767ba7))
-* **compute:** Move compute_small protos under its own directory ([#681](https://www.github.com/googleapis/google-cloud-go/issues/681)) ([3e7185c](https://www.github.com/googleapis/google-cloud-go/commit/3e7185c241d97ee342f132ae04bc93bb79a8e897))
-* **internal/gapicgen:** fix a compute filtering ([#5111](https://www.github.com/googleapis/google-cloud-go/issues/5111)) ([77aa19d](https://www.github.com/googleapis/google-cloud-go/commit/77aa19de7fc33a9e831e6b91bd324d6832b44d99))
-* **internal/godocfx:** only put TOC status on mod if all pkgs have same status ([#4974](https://www.github.com/googleapis/google-cloud-go/issues/4974)) ([309b59e](https://www.github.com/googleapis/google-cloud-go/commit/309b59e583d1bf0dd9ffe84223034eb8a2975d47))
-* **internal/godocfx:** replace * with HTML code ([#5049](https://www.github.com/googleapis/google-cloud-go/issues/5049)) ([a8f7c06](https://www.github.com/googleapis/google-cloud-go/commit/a8f7c066e8d97120ae4e12963e3c9acc8b8906c2))
-* **monitoring/apiv3:** Reintroduce deprecated field/enum for backward compatibility docs: Use absolute link targets in comments ([45fd259](https://www.github.com/googleapis/google-cloud-go/commit/45fd2594d99ef70c776df26866f0a3b537e7e69e))
-* **profiler:** workaround certificate expiration issue in integration tests ([#4955](https://www.github.com/googleapis/google-cloud-go/issues/4955)) ([de9e465](https://www.github.com/googleapis/google-cloud-go/commit/de9e465bea8cd0580c45e87d2cbc2b610615b363))
-* **security/privateca:** include mixin protos as input for mixin rpcs ([479c2f9](https://www.github.com/googleapis/google-cloud-go/commit/479c2f90d556a106b25ebcdb1539d231488182da))
-* **security/privateca:** repair service config to enable mixins ([83b941c](https://www.github.com/googleapis/google-cloud-go/commit/83b941c0983e44fdd18ceee8c6f3e91219d72ad1))
-* **video/transcoder:** update nodejs package name to video-transcoder ([30794e7](https://www.github.com/googleapis/google-cloud-go/commit/30794e70050b55ff87d6a80d0b4075065e9d271d))
-
-## [0.97.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.96.0...v0.97.0) (2021-09-29)
-
-
-### Features
-
-* **internal** add Retry func to testutil from samples repository [#4902](https://github.com/googleapis/google-cloud-go/pull/4902)
-
-## [0.96.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.95.0...v0.96.0) (2021-09-28)
-
-
-### Features
-
-* **civil:** add IsEmpty function to time, date and datetime ([#4728](https://www.github.com/googleapis/google-cloud-go/issues/4728)) ([88bfa64](https://www.github.com/googleapis/google-cloud-go/commit/88bfa64d6df2f3bb7d41e0b8f56717dd3de790e2)), refs [#4727](https://www.github.com/googleapis/google-cloud-go/issues/4727)
-* **internal/godocfx:** detect preview versions ([#4899](https://www.github.com/googleapis/google-cloud-go/issues/4899)) ([9b60844](https://www.github.com/googleapis/google-cloud-go/commit/9b608445ce9ebabbc87a50e85ce6ef89125031d2))
-* **internal:** provide wrapping for retried errors ([#4797](https://www.github.com/googleapis/google-cloud-go/issues/4797)) ([ce5f4db](https://www.github.com/googleapis/google-cloud-go/commit/ce5f4dbab884e847a2d9f1f8f3fcfd7df19a505a))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** restore fmting proto files ([#4789](https://www.github.com/googleapis/google-cloud-go/issues/4789)) ([5606b54](https://www.github.com/googleapis/google-cloud-go/commit/5606b54b97bb675487c6c138a4081c827218f933))
-* **internal/trace:** use xerrors.As for trace ([#4813](https://www.github.com/googleapis/google-cloud-go/issues/4813)) ([05fe61c](https://www.github.com/googleapis/google-cloud-go/commit/05fe61c5aa4860bdebbbe3e91a9afaba16aa6184))
-
-## [0.95.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.1...v0.95.0) (2021-09-21)
-
-### Bug Fixes
-
-* **internal/gapicgen:** add a temporary import ([#4756](https://www.github.com/googleapis/google-cloud-go/issues/4756)) ([4d9c046](https://www.github.com/googleapis/google-cloud-go/commit/4d9c046b66a2dc205e2c14b676995771301440da))
-* **compute/metadata:** remove heavy gax dependency ([#4784](https://www.github.com/googleapis/google-cloud-go/issues/4784)) ([ea00264](https://www.github.com/googleapis/google-cloud-go/commit/ea00264428137471805f2ec67f04f3a5a42928fa))
-
-### [0.94.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.94.0...v0.94.1) (2021-09-02)
-
-
-### Bug Fixes
-
-* **compute/metadata:** fix retry logic to not panic on error ([#4714](https://www.github.com/googleapis/google-cloud-go/issues/4714)) ([75c63b9](https://www.github.com/googleapis/google-cloud-go/commit/75c63b94d2cf86606fffc3611f7e6150b667eedc)), refs [#4713](https://www.github.com/googleapis/google-cloud-go/issues/4713)
-
-## [0.94.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.92.0...v0.94.0) (2021-08-31)
-
-
-### Features
-
-* **aiplatform:** add XAI, model monitoring, and index services to aiplatform v1 ([e385b40](https://www.github.com/googleapis/google-cloud-go/commit/e385b40a1e2ecf81f5fd0910de5c37275951f86b))
-* **analytics/admin:** add `GetDataRetentionSettings`, `UpdateDataRetentionSettings` methods to the API ([8467899](https://www.github.com/googleapis/google-cloud-go/commit/8467899ab6ebf0328c543bfb5fbcddeb2f53a082))
-* **asset:** Release of relationships in v1, Add content type Relationship to support relationship export Committer: lvv@ ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
-* **assuredworkloads:** Add Canada Regions And Support compliance regime ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
-* **cloudbuild/apiv1:** Add ability to configure BuildTriggers to create Builds that require approval before executing and ApproveBuild API to approve or reject pending Builds ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
-* **cloudbuild/apiv1:** add script field to BuildStep message ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
-* **cloudbuild/apiv1:** Update cloudbuild proto with the service_account for BYOSA Triggers. ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
-* **compute/metadata:** retry error when talking to metadata service ([#4648](https://www.github.com/googleapis/google-cloud-go/issues/4648)) ([81c6039](https://www.github.com/googleapis/google-cloud-go/commit/81c6039503121f8da3de4f4cd957b8488a3ef620)), refs [#4642](https://www.github.com/googleapis/google-cloud-go/issues/4642)
-* **dataproc:** remove apiv1beta2 client ([#4682](https://www.github.com/googleapis/google-cloud-go/issues/4682)) ([2248554](https://www.github.com/googleapis/google-cloud-go/commit/22485541affb1251604df292670a20e794111d3e))
-* **gaming:** support version reporting API ([cd65cec](https://www.github.com/googleapis/google-cloud-go/commit/cd65cecf15c4a01648da7f8f4f4d497772961510))
-* **gkehub:** Add request_id under `DeleteMembershipRequest` and `UpdateMembershipRequest` ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
-* **internal/carver:** support carving batches ([#4623](https://www.github.com/googleapis/google-cloud-go/issues/4623)) ([2972d19](https://www.github.com/googleapis/google-cloud-go/commit/2972d194da19bedf16d76fda471c06a965cfdcd6))
-* **kms:** add support for Key Reimport ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
-* **metastore:** Added the Backup resource and Backup resource GetIamPolicy/SetIamPolicy to V1 feat: Added the RestoreService method to V1 ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
-* **monitoring/dashboard:** Added support for logs-based alerts: https://cloud.google.com/logging/docs/alerting/log-based-alerts feat: Added support for user-defined labels on cloud monitoring's Service and ServiceLevelObjective objects fix!: mark required fields in QueryTimeSeriesRequest as required ([b9226eb](https://www.github.com/googleapis/google-cloud-go/commit/b9226eb0b34473cb6f920c2526ad0d6dacb03f3c))
-* **osconfig:** Update osconfig v1 and v1alpha with WindowsApplication ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
-* **speech:** Add transcript normalization ([b31646d](https://www.github.com/googleapis/google-cloud-go/commit/b31646d1e12037731df4b5c0ba9f60b6434d7b9b))
-* **talent:** Add new commute methods in Search APIs feat: Add new histogram type 'publish_time_in_day' feat: Support filtering by requisitionId is ListJobs API ([d4c3340](https://www.github.com/googleapis/google-cloud-go/commit/d4c3340bfc8b6793d6d2c8a3ed8ccdb472e1efd3))
-* **translate:** added v3 proto for online/batch document translation and updated v3beta1 proto for format conversion ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
-
-
-### Bug Fixes
-
-* **datastream:** Change a few resource pattern variables from camelCase to snake_case ([bf4378b](https://www.github.com/googleapis/google-cloud-go/commit/bf4378b5b859f7b835946891dbfebfee31c4b123))
-
-## [0.92.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.91.0...v0.92.0) (2021-08-16)
-
-
-### Features
-
-* **all:** remove testing deps ([#4580](https://www.github.com/googleapis/google-cloud-go/issues/4580)) ([15c1eb9](https://www.github.com/googleapis/google-cloud-go/commit/15c1eb9730f0b514edb911161f9c59e8d790a5ec)), refs [#4061](https://www.github.com/googleapis/google-cloud-go/issues/4061)
-* **internal/detect:** add helper to detect projectID from env ([#4582](https://www.github.com/googleapis/google-cloud-go/issues/4582)) ([cc65d94](https://www.github.com/googleapis/google-cloud-go/commit/cc65d945688ac446602bce6ef86a935714dfe2f8)), refs [#1294](https://www.github.com/googleapis/google-cloud-go/issues/1294)
-* **spannertest:** Add validation of duplicated column names ([#4611](https://www.github.com/googleapis/google-cloud-go/issues/4611)) ([84f86a6](https://www.github.com/googleapis/google-cloud-go/commit/84f86a605c809ab36dd3cb4b3ab1df15a5302083))
-
-## [0.91.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.90.0...v0.91.0) (2021-08-11)
-
-
-### Features
-
-* **.github:** support dynamic submodule detection ([#4537](https://www.github.com/googleapis/google-cloud-go/issues/4537)) ([4374b90](https://www.github.com/googleapis/google-cloud-go/commit/4374b907e9f166da6bd23a8ef94399872b00afd6))
-* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
-* **dialogflow/cx:** add advanced settings for agent level feat: add rollout config, state and failure reason for experiment feat: add insights export settings for security setting feat: add language code for streaming recognition result and flow versions for query parameters docs: deprecate legacy logging settings ([ed73554](https://www.github.com/googleapis/google-cloud-go/commit/ed735541dc57d0681d84b46853393eac5f7ccec3))
-* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-* **dialogflow/cx:** added support for DLP templates; expose `Locations` service to get/list avaliable locations of Dialogflow products docs: reorder some fields ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-* **dialogflow:** expose `Locations` service to get/list avaliable locations of Dialogflow products; fixed some API annotations ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-* **kms:** add support for HMAC, Variable Key Destruction, and GenerateRandom ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-* **speech:** add total_billed_time response field ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-* **video/transcoder:** Add video cropping feature feat: Add video padding feature feat: Add ttl_after_completion_days field to Job docs: Update proto documentation docs: Indicate v1beta1 deprecation ([5996846](https://www.github.com/googleapis/google-cloud-go/commit/59968462a3870c6289166fa1161f9b6d9c10e093))
-
-
-### Bug Fixes
-
-* **functions:** Updating behavior of source_upload_url during Get/List function calls ([381a494](https://www.github.com/googleapis/google-cloud-go/commit/381a494c29da388977b0bdda2177058328cc4afe))
-
-## [0.90.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.89.0...v0.90.0) (2021-08-03)
-
-
-### ⚠ BREAKING CHANGES
-
-* **compute:** add pagination and an Operation wrapper (#4542)
-
-### Features
-
-* **compute:** add pagination and an Operation wrapper ([#4542](https://www.github.com/googleapis/google-cloud-go/issues/4542)) ([36f4649](https://www.github.com/googleapis/google-cloud-go/commit/36f46494111f6d16d103fb208d49616576dbf91e))
-* **internal/godocfx:** add status to packages and TOCs ([#4547](https://www.github.com/googleapis/google-cloud-go/issues/4547)) ([c6de69c](https://www.github.com/googleapis/google-cloud-go/commit/c6de69c710561bb2a40eff05417df4b9798c258a))
-* **internal/godocfx:** mark status of deprecated items ([#4525](https://www.github.com/googleapis/google-cloud-go/issues/4525)) ([d571c6f](https://www.github.com/googleapis/google-cloud-go/commit/d571c6f4337ec9c4807c230cd77f53b6e7db6437))
-
-
-### Bug Fixes
-
-* **internal/carver:** don't tag commits ([#4518](https://www.github.com/googleapis/google-cloud-go/issues/4518)) ([c355eb8](https://www.github.com/googleapis/google-cloud-go/commit/c355eb8ecb0bb1af0ccf55e6262ca8c0d5c7e352))
-
-## [0.89.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.88.0...v0.89.0) (2021-07-29)
-
-
-### Features
-
-* **assuredworkloads:** Add EU Regions And Support compliance regime ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **datacatalog:** Added support for BigQuery connections entries feat: Added support for BigQuery routines entries feat: Added usage_signal field feat: Added labels field feat: Added ReplaceTaxonomy in Policy Tag Manager Serialization API feat: Added support for public tag templates feat: Added support for rich text tags docs: Documentation improvements ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **datafusion:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
-* **iap:** start generating apiv1 ([e55a016](https://www.github.com/googleapis/google-cloud-go/commit/e55a01667afaf36ff70807d061ecafb61827ba97))
-* **internal/carver:** add tooling to help carve out sub-modules ([#4417](https://www.github.com/googleapis/google-cloud-go/issues/4417)) ([a7e28f2](https://www.github.com/googleapis/google-cloud-go/commit/a7e28f2557469562ae57e5174b41bdf8fce62b63))
-* **networkconnectivity:** Add files for Network Connectivity v1 API. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **retail:** Add restricted Retail Search features for Retail API v2. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **secretmanager:** In Secret Manager, users can now use filter to customize the output of ListSecrets/ListSecretVersions calls ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **securitycenter:** add finding_class and indicator fields in Finding ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **speech:** add total_billed_time response field. fix!: phrase_set_id is required field in CreatePhraseSetRequest. fix!: custom_class_id is required field in CreateCustomClassRequest. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
-* **storagetransfer:** start generating apiv1 ([#4505](https://www.github.com/googleapis/google-cloud-go/issues/4505)) ([f2d531d](https://www.github.com/googleapis/google-cloud-go/commit/f2d531d2b519efa58e0f23a178bbebe675c203c3))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** exec Stdout already set ([#4509](https://www.github.com/googleapis/google-cloud-go/issues/4509)) ([41246e9](https://www.github.com/googleapis/google-cloud-go/commit/41246e900aaaea92a9f956e92956c40c86f4cb3a))
-* **internal/gapicgen:** tidy all after dep bump ([#4515](https://www.github.com/googleapis/google-cloud-go/issues/4515)) ([9401be5](https://www.github.com/googleapis/google-cloud-go/commit/9401be509c570c3c55694375065c84139e961857)), refs [#4434](https://www.github.com/googleapis/google-cloud-go/issues/4434)
-
-## [0.88.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.87.0...v0.88.0) (2021-07-22)
-
-
-### ⚠ BREAKING CHANGES
-
-* **cloudbuild/apiv1:** Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported.
-
-### Features
-
-* **cloudbuild/apiv1:** add a WorkerPools API ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
-* **cloudbuild/apiv1:** Implementation of Build Failure Info: - Added message FailureInfo field ([19ea3f8](https://www.github.com/googleapis/google-cloud-go/commit/19ea3f830212582bfee21d9e09f0034f9ce76547))
-* **osconfig/agentendpoint:** OSConfig AgentEndpoint: add basic os info to RegisterAgentRequest, add WindowsApplication type to Inventory ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
-* **resourcesettings:** Publish Cloud ResourceSettings v1 API ([43ad3cb](https://www.github.com/googleapis/google-cloud-go/commit/43ad3cb7be981fff9dc5dcf4510f1cd7bea99957))
-
-
-### Bug Fixes
-
-* **internal/godocfx:** set exit code, print cmd output, no go get ... ([#4445](https://www.github.com/googleapis/google-cloud-go/issues/4445)) ([cc70f77](https://www.github.com/googleapis/google-cloud-go/commit/cc70f77ac279a62e24e1b07f6e53fd126b7286b0))
-* **internal:** detect module for properly generating docs URLs ([#4460](https://www.github.com/googleapis/google-cloud-go/issues/4460)) ([1eaba8b](https://www.github.com/googleapis/google-cloud-go/commit/1eaba8bd694f7552a8e3e09b4f164de8b6ca23f0)), refs [#4447](https://www.github.com/googleapis/google-cloud-go/issues/4447)
-* **kms:** Updating WORKSPACE files to use the newest version of the Typescript generator. ([8936bc3](https://www.github.com/googleapis/google-cloud-go/commit/8936bc3f2d0fb2f6514f6e019fa247b8f41bd43c))
-
-## [0.87.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.86.0...v0.87.0) (2021-07-13)
-
-
-### Features
-
-* **container:** allow updating security group on existing clusters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
-* **monitoring/dashboard:** added validation only mode when writing dashboards feat: added alert chart widget ([652d7c2](https://www.github.com/googleapis/google-cloud-go/commit/652d7c277da2f6774729064ab65d557875c81567))
-* **networkmanagment:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
-* **secretmanager:** Tune Secret Manager auto retry parameters ([528ffc9](https://www.github.com/googleapis/google-cloud-go/commit/528ffc9bd63090129a8b1355cd31273f8c23e34c))
-* **video/transcoder:** start generating apiv1 ([907592c](https://www.github.com/googleapis/google-cloud-go/commit/907592c576abfc65c01bbcd30c1a6094916cdc06))
-
-
-### Bug Fixes
-
-* **compute:** properly generate PUT requests ([#4426](https://www.github.com/googleapis/google-cloud-go/issues/4426)) ([a7491a5](https://www.github.com/googleapis/google-cloud-go/commit/a7491a533e4ad75eb6d5f89718d4dafb0c5b4167))
-* **internal:** fix relative pathing for generator ([#4397](https://www.github.com/googleapis/google-cloud-go/issues/4397)) ([25e0eae](https://www.github.com/googleapis/google-cloud-go/commit/25e0eaecf9feb1caa97988c5398ac58f6ca17391))
-
-
-### Miscellaneous Chores
-
-* **all:** fix release version ([#4427](https://www.github.com/googleapis/google-cloud-go/issues/4427)) ([2c0d267](https://www.github.com/googleapis/google-cloud-go/commit/2c0d2673ccab7281b6432215ee8279f9efd04a15))
-
-## [0.86.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.85.0...v0.86.0) (2021-07-01)
-
-
-### Features
-
-* **bigquery managedwriter:** schema conversion support ([#4357](https://www.github.com/googleapis/google-cloud-go/issues/4357)) ([f2b20f4](https://www.github.com/googleapis/google-cloud-go/commit/f2b20f493e2ed5a883ce42fa65695c03c574feb5))
-
-## [0.85.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.84.0...v0.85.0) (2021-06-30)
-
-
-### Features
-
-* **dataflow:** start generating apiv1beta3 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
-* **datastream:** start generating apiv1alpha1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
-* **dialogflow:** added Automated agent reply type and allow cancellation flag for partial response feature. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
-* **documentai:** update document.proto, add the processor management methods. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
-* **eventarc:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
-* **gkehub:** added v1alpha messages and client for gkehub ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
-* **internal/godocfx:** add support for other modules ([#4290](https://www.github.com/googleapis/google-cloud-go/issues/4290)) ([d52bae6](https://www.github.com/googleapis/google-cloud-go/commit/d52bae6cd77474174192c46236d309bf967dfa00))
-* **internal/godocfx:** different metadata for different modules ([#4297](https://www.github.com/googleapis/google-cloud-go/issues/4297)) ([598f5b9](https://www.github.com/googleapis/google-cloud-go/commit/598f5b93778b2e2e75265ae54484dd54477433f5))
-* **internal:** add force option for regen ([#4310](https://www.github.com/googleapis/google-cloud-go/issues/4310)) ([de654eb](https://www.github.com/googleapis/google-cloud-go/commit/de654ebfcf23a53b4d1fee0aa48c73999a55c1a6))
-* **servicecontrol:** Added the gRPC service config for the Service Controller v1 API docs: Updated some comments. ([8fb4649](https://www.github.com/googleapis/google-cloud-go/commit/8fb464956f0ca51d30e8e14dc625ff9fa150c437))
-* **workflows/executions:** start generating apiv1 ([cfee361](https://www.github.com/googleapis/google-cloud-go/commit/cfee36161d41e3a0f769e51ab96c25d0967af273))
-
-
-### Bug Fixes
-
-* **internal:** add autogenerated header to snippets ([#4261](https://www.github.com/googleapis/google-cloud-go/issues/4261)) ([2220787](https://www.github.com/googleapis/google-cloud-go/commit/222078722c37c3fdadec7bbbe0bcf81edd105f1a)), refs [#4260](https://www.github.com/googleapis/google-cloud-go/issues/4260)
-* **internal:** fix googleapis-disco regen ([#4354](https://www.github.com/googleapis/google-cloud-go/issues/4354)) ([aeea1ce](https://www.github.com/googleapis/google-cloud-go/commit/aeea1ce1e5dff3acdfe208932327b52c49851b41))
-* **kms:** replace IAMPolicy mixin in service config. ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
-* **security/privateca:** Fixed casing of the Ruby namespace ([5a9c6ce](https://www.github.com/googleapis/google-cloud-go/commit/5a9c6ce781fb6a338e29d3dee72367998d834af0))
-
-## [0.84.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.83.0...v0.84.0) (2021-06-09)
-
-
-### Features
-
-* **aiplatform:** start generating apiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
-* **apigeeconnect:** start generating abiv1 ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
-* **dialogflow/cx:** support sentiment analysis in bot testing ([7a57aac](https://www.github.com/googleapis/google-cloud-go/commit/7a57aac996f2bae20ee6ddbd02ad9e56e380099b))
-* **dialogflow/cx:** support sentiment analysis in bot testing ([6ad2306](https://www.github.com/googleapis/google-cloud-go/commit/6ad2306f64710ce16059b464342dbc6a98d2d9c2))
-* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
-* **documentai:** Move CommonOperationMetadata into a separate proto file for potential reuse. ([18375e5](https://www.github.com/googleapis/google-cloud-go/commit/18375e50e8f16e63506129b8927a7b62f85e407b))
-* **gkeconnect/gateway:** start generating apiv1beta1 ([#4235](https://www.github.com/googleapis/google-cloud-go/issues/4235)) ([1c3e968](https://www.github.com/googleapis/google-cloud-go/commit/1c3e9689d78670a231a3660db00fd4fd8f5c6345))
-* **lifesciences:** strat generating apiv2beta ([be1d729](https://www.github.com/googleapis/google-cloud-go/commit/be1d729fdaa18eb1c782f3b09a6bb8fd6b3a144c))
-* **tpu:** start generating apiv1 ([#4199](https://www.github.com/googleapis/google-cloud-go/issues/4199)) ([cac48ea](https://www.github.com/googleapis/google-cloud-go/commit/cac48eab960cd34cc20732f6a1aeb93c540a036b))
-
-
-### Bug Fixes
-
-* **bttest:** fix race condition in SampleRowKeys ([#4207](https://www.github.com/googleapis/google-cloud-go/issues/4207)) ([5711fb1](https://www.github.com/googleapis/google-cloud-go/commit/5711fb10d25c458807598d736a232bb2210a047a))
-* **documentai:** Fix Ruby gem title of documentai v1 (package not currently published) ([9e80ea0](https://www.github.com/googleapis/google-cloud-go/commit/9e80ea0d053b06876418194f65a478045dc4fe6c))
-
-## [0.83.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.82.0...v0.83.0) (2021-06-02)
-
-
-### Features
-
-* **dialogflow:** added a field in the query result to indicate whether slot filling is cancelled. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
-* **essentialcontacts:** start generating apiv1 ([#4118](https://www.github.com/googleapis/google-cloud-go/issues/4118)) ([fe14afc](https://www.github.com/googleapis/google-cloud-go/commit/fe14afcf74e09089b22c4f5221cbe37046570fda))
-* **gsuiteaddons:** start generating apiv1 ([#4082](https://www.github.com/googleapis/google-cloud-go/issues/4082)) ([6de5c99](https://www.github.com/googleapis/google-cloud-go/commit/6de5c99173c4eeaf777af18c47522ca15637d232))
-* **osconfig:** OSConfig: add ExecResourceOutput and per step error message. ([f9cda8f](https://www.github.com/googleapis/google-cloud-go/commit/f9cda8fb6c3d76a062affebe6649f0a43aeb96f3))
-* **osconfig:** start generating apiv1alpha ([#4119](https://www.github.com/googleapis/google-cloud-go/issues/4119)) ([8ad471f](https://www.github.com/googleapis/google-cloud-go/commit/8ad471f26087ec076460df6dcf27769ffe1b8834))
-* **privatecatalog:** start generating apiv1beta1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
-* **serviceusage:** start generating apiv1 ([#4120](https://www.github.com/googleapis/google-cloud-go/issues/4120)) ([e4531f9](https://www.github.com/googleapis/google-cloud-go/commit/e4531f93cfeb6388280bb253ef6eb231aba37098))
-* **shell:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
-* **vpcaccess:** start generating apiv1 ([500c1a6](https://www.github.com/googleapis/google-cloud-go/commit/500c1a6101f624cb6032f0ea16147645a02e7076))
-
-## [0.82.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.81.0...v0.82.0) (2021-05-17)
-
-
-### Features
-
-* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
-* **billing/budgets:** Added support for configurable budget time period. fix: Updated some documentation links. ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
-* **cloudbuild/apiv1:** Add fields for Pub/Sub triggers ([8b4adbf](https://www.github.com/googleapis/google-cloud-go/commit/8b4adbf9815e1ec229dfbcfb9189d3ea63112e1b))
-* **cloudbuild/apiv1:** Implementation of Source Manifests: - Added message StorageSourceManifest as an option for the Source message - Added StorageSourceManifest field to the SourceProvenance message ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
-* **clouddms:** start generating apiv1 ([#4081](https://www.github.com/googleapis/google-cloud-go/issues/4081)) ([29df85c](https://www.github.com/googleapis/google-cloud-go/commit/29df85c40ab64d59e389a980c9ce550077839763))
-* **dataproc:** update the Dataproc V1 API client library ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
-* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
-* **dialogflow/cx:** add support for service directory webhooks ([7fd2ccd](https://www.github.com/googleapis/google-cloud-go/commit/7fd2ccd26adec1468e15fe84bf75210255a9dfea))
-* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
-* **dialogflow/cx:** support setting current_page to resume sessions; expose transition_route_groups in flows and language_code in webhook ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
-* **dialogflow:** added more Environment RPCs feat: added Versions service feat: added Fulfillment service feat: added TextToSpeechSettings. feat: added location in some resource patterns. ([4f73dc1](https://www.github.com/googleapis/google-cloud-go/commit/4f73dc19c2e05ad6133a8eac3d62ddb522314540))
-* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
-* **documentai:** add confidence field to the PageAnchor.PageRef in document.proto. ([07fdcd1](https://www.github.com/googleapis/google-cloud-go/commit/07fdcd12499eac26f9b5fae01d6c1282c3e02b7c))
-* **internal/gapicgen:** only update relevant gapic files ([#4066](https://www.github.com/googleapis/google-cloud-go/issues/4066)) ([5948bee](https://www.github.com/googleapis/google-cloud-go/commit/5948beedbadd491601bdee6a006cf685e94a85f4))
-* **internal/gensnippets:** add license header and region tags ([#3924](https://www.github.com/googleapis/google-cloud-go/issues/3924)) ([e9ff7a0](https://www.github.com/googleapis/google-cloud-go/commit/e9ff7a0f9bb1cc67f5d0de47934811960429e72c))
-* **internal/gensnippets:** initial commit ([#3922](https://www.github.com/googleapis/google-cloud-go/issues/3922)) ([3fabef0](https://www.github.com/googleapis/google-cloud-go/commit/3fabef032388713f732ab4dbfc51624cdca0f481))
-* **internal:** auto-generate snippets ([#3949](https://www.github.com/googleapis/google-cloud-go/issues/3949)) ([b70e0fc](https://www.github.com/googleapis/google-cloud-go/commit/b70e0fccdc86813e0d97ff63b585822d4deafb38))
-* **internal:** generate region tags for snippets ([#3962](https://www.github.com/googleapis/google-cloud-go/issues/3962)) ([ef2b90e](https://www.github.com/googleapis/google-cloud-go/commit/ef2b90ea6d47e27744c98a1a9ae0c487c5051808))
-* **metastore:** start generateing apiv1 ([#4083](https://www.github.com/googleapis/google-cloud-go/issues/4083)) ([661610a](https://www.github.com/googleapis/google-cloud-go/commit/661610afa6a9113534884cafb138109536724310))
-* **security/privateca:** start generating apiv1 ([#4023](https://www.github.com/googleapis/google-cloud-go/issues/4023)) ([08aa83a](https://www.github.com/googleapis/google-cloud-go/commit/08aa83a5371bb6485bc3b19b3ed5300f807ce69f))
-* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
-* **securitycenter:** add canonical_name and folder fields ([5c5ca08](https://www.github.com/googleapis/google-cloud-go/commit/5c5ca08c637a23cfa3e3a051fea576e1feb324fd))
-* **speech:** add webm opus support. ([d089dda](https://www.github.com/googleapis/google-cloud-go/commit/d089dda0089acb9aaef9b3da40b219476af9fc06))
-* **speech:** Support for spoken punctuation and spoken emojis. ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
-
-
-### Bug Fixes
-
-* **binaryauthorization:** add Java options to Binaryauthorization protos ([9a459d5](https://www.github.com/googleapis/google-cloud-go/commit/9a459d5d149b9c3b02a35d4245d164b899ff09b3))
-* **internal/gapicgen:** filter out internal directory changes ([#4085](https://www.github.com/googleapis/google-cloud-go/issues/4085)) ([01473f6](https://www.github.com/googleapis/google-cloud-go/commit/01473f6d8db26c6e18969ace7f9e87c66e94ad9e))
-* **internal/gapicgen:** use correct region tags for gensnippets ([#4022](https://www.github.com/googleapis/google-cloud-go/issues/4022)) ([8ccd689](https://www.github.com/googleapis/google-cloud-go/commit/8ccd689cab08f016008ca06a939a4828817d4a25))
-* **internal/gensnippets:** run goimports ([#3931](https://www.github.com/googleapis/google-cloud-go/issues/3931)) ([10050f0](https://www.github.com/googleapis/google-cloud-go/commit/10050f05c20c226547d87c08168fa4bc551395c5))
-* **internal:** append a new line to comply with go fmt ([#4028](https://www.github.com/googleapis/google-cloud-go/issues/4028)) ([a297278](https://www.github.com/googleapis/google-cloud-go/commit/a2972783c4af806199d1c67c9f63ad9677f20f34))
-* **internal:** make sure formatting is run on snippets ([#4039](https://www.github.com/googleapis/google-cloud-go/issues/4039)) ([130dfc5](https://www.github.com/googleapis/google-cloud-go/commit/130dfc535396e98fc009585b0457e3bc48ead941)), refs [#4037](https://www.github.com/googleapis/google-cloud-go/issues/4037)
-* **metastore:** increase metastore lro polling timeouts ([83b1b3b](https://www.github.com/googleapis/google-cloud-go/commit/83b1b3b648c6d9225f07f00e8c0cdabc3d1fc1ab))
-
-
-### Miscellaneous Chores
-
-* **all:** fix release version ([#4040](https://www.github.com/googleapis/google-cloud-go/issues/4040)) ([4c991a9](https://www.github.com/googleapis/google-cloud-go/commit/4c991a928665d9be93691decce0c653f430688b7))
-
-## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02)
-
-
-### Features
-
-* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
-* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4))
-* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
-* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
-* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
-
-
-### Bug Fixes
-
-* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678))
-
-## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23)
-
-
-### ⚠ BREAKING CHANGES
-
-* **all:** This is a breaking change in dialogflow
-
-### Features
-
-* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
-* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
-* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
-* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
-* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634)
-* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61))
-* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf))
-* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57))
-* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f))
-* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
-* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e))
-* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493))
-* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86))
-* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee))
-
-
-### Miscellaneous Chores
-
-* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524))
-
-## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10)
-
-
-### Features
-
-* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f))
-* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
-* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215))
-* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441))
-* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25))
-* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e))
-* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548))
-* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650))
-* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4))
-* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
-
-
-### Bug Fixes
-
-* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01))
-* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495))
-* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80))
-* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef))
-* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c))
-* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e))
-
-## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22)
-
-
-### Features
-
-* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
-* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
-* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
-* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
-* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
-* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
-* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
-
-
-### Bug Fixes
-
-* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4))
-* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113))
-
-## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16)
-
-
-### Features
-
-* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04))
-* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
-* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2))
-* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418))
-* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
-* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f))
-* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac))
-
-
-### Bug Fixes
-
-* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
-* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd))
-* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8))
-* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a))
-* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363))
-* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663))
-
-## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02)
-
-
-### Features
-
-* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
-* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db))
-* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d))
-* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
-* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e))
-* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
-* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
-* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
-* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7))
-* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3))
-* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
-* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf))
-* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5))
-* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
-* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f))
-* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7))
-* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102))
-* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149))
-* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b))
-* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e))
-* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af))
-* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279))
-* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359))
-* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f))
-* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531))
-* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8))
-* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8))
-* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60))
-* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1))
-* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e))
-* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a))
-* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db))
-
-
-### Bug Fixes
-
-* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
-* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a))
-* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71))
-
-## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448)
-* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a))
-
-## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3440](https://www.github.com/googleapis/google-cloud-go/issues/3440) [#3436](https://www.github.com/googleapis/google-cloud-go/issues/3436) [#3394](https://www.github.com/googleapis/google-cloud-go/issues/3394) [#3391](https://www.github.com/googleapis/google-cloud-go/issues/3391) [#3374](https://www.github.com/googleapis/google-cloud-go/issues/3374)
-* **internal/gapicgen:** support generating only gapics with genlocal ([#3383](https://www.github.com/googleapis/google-cloud-go/issues/3383)) ([eaa742a](https://www.github.com/googleapis/google-cloud-go/commit/eaa742a248dc7d93c019863248f28e37f88aae84))
-* **servicedirectory:** start generating apiv1 ([#3382](https://www.github.com/googleapis/google-cloud-go/issues/3382)) ([2774925](https://www.github.com/googleapis/google-cloud-go/commit/2774925925909071ebc585cf7400373334c156ba))
-
-
-### Bug Fixes
-
-* **internal/gapicgen:** don't create genproto pr as draft ([#3379](https://www.github.com/googleapis/google-cloud-go/issues/3379)) ([517ab0f](https://www.github.com/googleapis/google-cloud-go/commit/517ab0f25e544498c5374b256354bc41ba936ad5))
-
-## [0.73.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.72.0...v0.73.0) (2020-12-04)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3335](https://www.github.com/googleapis/google-cloud-go/issues/3335) [#3294](https://www.github.com/googleapis/google-cloud-go/issues/3294) [#3250](https://www.github.com/googleapis/google-cloud-go/issues/3250) [#3229](https://www.github.com/googleapis/google-cloud-go/issues/3229) [#3211](https://www.github.com/googleapis/google-cloud-go/issues/3211) [#3217](https://www.github.com/googleapis/google-cloud-go/issues/3217) [#3212](https://www.github.com/googleapis/google-cloud-go/issues/3212) [#3209](https://www.github.com/googleapis/google-cloud-go/issues/3209) [#3206](https://www.github.com/googleapis/google-cloud-go/issues/3206) [#3199](https://www.github.com/googleapis/google-cloud-go/issues/3199)
-* **artifactregistry:** start generating apiv1beta2 ([#3352](https://www.github.com/googleapis/google-cloud-go/issues/3352)) ([2e6f20b](https://www.github.com/googleapis/google-cloud-go/commit/2e6f20b0ab438b0b366a1a3802fc64d1a0e66fff))
-* **internal:** copy pubsub Message and PublishResult to internal/pubsub ([#3351](https://www.github.com/googleapis/google-cloud-go/issues/3351)) ([82521ee](https://www.github.com/googleapis/google-cloud-go/commit/82521ee5038735c1663525658d27e4df00ec90be))
-* **internal/gapicgen:** support adding context to regen ([#3174](https://www.github.com/googleapis/google-cloud-go/issues/3174)) ([941ab02](https://www.github.com/googleapis/google-cloud-go/commit/941ab029ba6f7f33e8b2e31e3818aeb68312a999))
-* **internal/kokoro:** add ability to regen all DocFX YAML ([#3191](https://www.github.com/googleapis/google-cloud-go/issues/3191)) ([e12046b](https://www.github.com/googleapis/google-cloud-go/commit/e12046bc4431d33aee72c324e6eb5cc907a4214a))
-
-
-### Bug Fixes
-
-* **internal/godocfx:** filter out test packages from other modules ([#3197](https://www.github.com/googleapis/google-cloud-go/issues/3197)) ([1d397aa](https://www.github.com/googleapis/google-cloud-go/commit/1d397aa8b41f8f980cba1d3dcc50f11e4d4f4ca0))
-
-## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119)
-
-
-### Bug Fixes
-
-* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44))
-* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a))
-
-
-## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044)
-* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3))
-* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42))
-* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc))
-* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171))
-* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6))
-* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2))
-* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900))
-
-
-### Bug Fixes
-
-* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb))
-* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7))
-
-## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025)
-* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44))
-* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714))
-
-## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14)
-
-This is an empty release that was created solely to aid in pubsublite's module
-carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14)
-
-
-### Features
-
-* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045))
-* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958)
-* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33))
-* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf))
-* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b))
-* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e))
-* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85))
-
-
-### Bug Fixes
-
-* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0))
-
-## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935)
-
-## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864)
-* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568))
-* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72))
-* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830))
-* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e))
-* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61))
-
-
-### Bug Fixes
-
-* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d))
-* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7))
-* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883)
-
-## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#2849](https://www.github.com/googleapis/google-cloud-go/issues/2849) [#2843](https://www.github.com/googleapis/google-cloud-go/issues/2843) [#2841](https://www.github.com/googleapis/google-cloud-go/issues/2841) [#2819](https://www.github.com/googleapis/google-cloud-go/issues/2819) [#2816](https://www.github.com/googleapis/google-cloud-go/issues/2816) [#2809](https://www.github.com/googleapis/google-cloud-go/issues/2809) [#2801](https://www.github.com/googleapis/google-cloud-go/issues/2801) [#2795](https://www.github.com/googleapis/google-cloud-go/issues/2795) [#2791](https://www.github.com/googleapis/google-cloud-go/issues/2791) [#2788](https://www.github.com/googleapis/google-cloud-go/issues/2788) [#2781](https://www.github.com/googleapis/google-cloud-go/issues/2781)
-* **analytics/data:** start generating apiv1alpha ([#2796](https://www.github.com/googleapis/google-cloud-go/issues/2796)) ([e93132c](https://www.github.com/googleapis/google-cloud-go/commit/e93132c77725de3c80c34d566df269eabfcfde93))
-* **area120/tables:** start generating apiv1alpha1 ([#2807](https://www.github.com/googleapis/google-cloud-go/issues/2807)) ([9e5a4d0](https://www.github.com/googleapis/google-cloud-go/commit/9e5a4d0dee0d83be0c020797a2f579d9e42ef521))
-* **cloudbuild:** Start generating apiv1/v3 ([#2830](https://www.github.com/googleapis/google-cloud-go/issues/2830)) ([358a536](https://www.github.com/googleapis/google-cloud-go/commit/358a5368da64cf4868551652e852ceb453504f64))
-* **godocfx:** create Go DocFX YAML generator ([#2854](https://www.github.com/googleapis/google-cloud-go/issues/2854)) ([37c70ac](https://www.github.com/googleapis/google-cloud-go/commit/37c70acd91768567106ff3b2b130835998d974c5))
-* **security/privateca:** start generating apiv1beta1 ([#2806](https://www.github.com/googleapis/google-cloud-go/issues/2806)) ([f985141](https://www.github.com/googleapis/google-cloud-go/commit/f9851412183989dc69733a7e61ad39a9378cd893))
-* **video/transcoder:** start generating apiv1beta1 ([#2797](https://www.github.com/googleapis/google-cloud-go/issues/2797)) ([390dda8](https://www.github.com/googleapis/google-cloud-go/commit/390dda8ff2c526e325e434ad0aec778b7aa97ea4))
-* **workflows:** start generating apiv1beta ([#2799](https://www.github.com/googleapis/google-cloud-go/issues/2799)) ([0e39665](https://www.github.com/googleapis/google-cloud-go/commit/0e39665ccb788caec800e2887d433ca6e0cf9901))
-* **workflows/executions:** start generating apiv1beta ([#2800](https://www.github.com/googleapis/google-cloud-go/issues/2800)) ([7eaa0d1](https://www.github.com/googleapis/google-cloud-go/commit/7eaa0d184c6a2141d8bf4514b3fd20715b50a580))
-
-
-### Bug Fixes
-
-* **internal/kokoro:** install the right version of docuploader ([#2861](https://www.github.com/googleapis/google-cloud-go/issues/2861)) ([d8489c1](https://www.github.com/googleapis/google-cloud-go/commit/d8489c141b8b02e83d6426f4baebd3658ae11639))
-* **internal/kokoro:** remove extra dash in doc tarball ([#2862](https://www.github.com/googleapis/google-cloud-go/issues/2862)) ([690ddcc](https://www.github.com/googleapis/google-cloud-go/commit/690ddccc5202b5a70f1afa5c518dca37b6a0861c))
-* **profiler:** do not collect disabled profile types ([#2836](https://www.github.com/googleapis/google-cloud-go/issues/2836)) ([faeb498](https://www.github.com/googleapis/google-cloud-go/commit/faeb4985bf6afdcddba4553efa874642bf7f08ed)), refs [#2835](https://www.github.com/googleapis/google-cloud-go/issues/2835)
-
-
-### Reverts
-
-* **cloudbuild): "feat(cloudbuild:** Start generating apiv1/v3" ([#2840](https://www.github.com/googleapis/google-cloud-go/issues/2840)) ([3aaf755](https://www.github.com/googleapis/google-cloud-go/commit/3aaf755476dfea1700986fc086f53fc1ab756557))
-
-## [0.65.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.64.0...v0.65.0) (2020-08-27)
-
-
-### Announcements
-
-The following changes will be included in an upcoming release and are not
-included in this one.
-
-#### Default Deadlines
-
-By default, non-streaming methods, like Create or Get methods, will have a
-default deadline applied to the context provided at call time, unless a context
-deadline is already set. Streaming methods have no default deadline and will run
-indefinitely, unless the context provided at call time contains a deadline.
-
-To opt-out of this behavior, set the environment variable
-`GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE` to `true` prior to
-initializing a client. This opt-out mechanism will be removed in a later
-release, with a notice similar to this one ahead of its removal.
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#2774](https://www.github.com/googleapis/google-cloud-go/issues/2774) [#2764](https://www.github.com/googleapis/google-cloud-go/issues/2764)
-
-
-### Bug Fixes
-
-* **all:** correct minor typos ([#2756](https://www.github.com/googleapis/google-cloud-go/issues/2756)) ([03d78b5](https://www.github.com/googleapis/google-cloud-go/commit/03d78b5627819cb64d1f3866f90043f709e825e1))
-* **compute/metadata:** remove leading slash for Get suffix ([#2760](https://www.github.com/googleapis/google-cloud-go/issues/2760)) ([f0d605c](https://www.github.com/googleapis/google-cloud-go/commit/f0d605ccf32391a9da056a2c551158bd076c128d))
-
-## [0.64.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.63.0...v0.64.0) (2020-08-18)
-
-
-### Features
-
-* **all:** auto-regenerate gapics , refs [#2734](https://www.github.com/googleapis/google-cloud-go/issues/2734) [#2731](https://www.github.com/googleapis/google-cloud-go/issues/2731) [#2730](https://www.github.com/googleapis/google-cloud-go/issues/2730) [#2725](https://www.github.com/googleapis/google-cloud-go/issues/2725) [#2722](https://www.github.com/googleapis/google-cloud-go/issues/2722) [#2706](https://www.github.com/googleapis/google-cloud-go/issues/2706)
-* **pubsublite:** start generating v1 ([#2700](https://www.github.com/googleapis/google-cloud-go/issues/2700)) ([d2e777f](https://www.github.com/googleapis/google-cloud-go/commit/d2e777f56e08146646b3ffb7a78856795094ab4e))
-
-## [0.63.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.62.0...v0.63.0) (2020-08-05)
-
-
-### Features
-
-* **all:** auto-regenerate gapics ([#2682](https://www.github.com/googleapis/google-cloud-go/issues/2682)) ([63bfd63](https://www.github.com/googleapis/google-cloud-go/commit/63bfd638da169e0f1f4fa4a5125da2955022dc04))
-* **analytics/admin:** start generating apiv1alpha ([#2670](https://www.github.com/googleapis/google-cloud-go/issues/2670)) ([268199e](https://www.github.com/googleapis/google-cloud-go/commit/268199e5350a64a83ecf198e0e0fa4863f00fa6c))
-* **functions/metadata:** Special-case marshaling ([#2669](https://www.github.com/googleapis/google-cloud-go/issues/2669)) ([d8d7fc6](https://www.github.com/googleapis/google-cloud-go/commit/d8d7fc66cbc42f79bec25fb0daaf53d926e3645b))
-* **gaming:** start generate apiv1 ([#2681](https://www.github.com/googleapis/google-cloud-go/issues/2681)) ([1adfd0a](https://www.github.com/googleapis/google-cloud-go/commit/1adfd0aed6b2c0e1dd0c575a5ec0f49388fa5601))
-* **internal/kokoro:** add script to test compatibility with samples ([#2637](https://www.github.com/googleapis/google-cloud-go/issues/2637)) ([f2aa76a](https://www.github.com/googleapis/google-cloud-go/commit/f2aa76a0058e86c1c33bb634d2c084b58f77ab32))
-
-## v0.62.0
-
-### Announcements
-
-- There was a breaking change to `cloud.google.com/go/dataproc/apiv1` that was
- merged in [this PR](https://github.com/googleapis/google-cloud-go/pull/2606).
- This fixed a broken API response for `DiagnoseCluster`. When polling on the
- Long Running Operation(LRO), the API now returns
- `(*dataprocpb.DiagnoseClusterResults, error)` whereas it only returned an
- `error` before.
-
-### Changes
-
-- all:
- - Updated all direct dependencies.
- - Updated contributing guidelines to suggest allowing edits from maintainers.
-- billing/budgets:
- - Start generating client for apiv1beta1.
-- functions:
- - Start generating client for apiv1.
-- notebooks:
- - Start generating client apiv1beta1.
-- profiler:
- - update proftest to support parsing floating-point backoff durations.
- - Fix the regexp used to parse backoff duration.
-- Various updates to autogenerated clients.
-
-## v0.61.0
-
-### Changes
-
-- all:
- - Update all direct dependencies.
-- dashboard:
- - Start generating client for apiv1.
-- policytroubleshooter:
- - Start generating client for apiv1.
-- profiler:
- - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`.
-- Various updates to autogenerated clients.
-
-## v0.60.0
-
-### Changes
-
-- all:
- - Refactored examples to reduce module dependencies.
- - Update sub-modules to use cloud.google.com/go v0.59.0.
-- internal:
- - Start generating client for gaming apiv1beta.
-- Various updates to autogenerated clients.
-
-## v0.59.0
-
-### Announcements
-
-goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our
-contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept
-pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to
-point to GitHub.
-
-### Changes
-
-- all:
- - Remove dependency on honnef.co/go/tools.
- - Update our contributing instructions now that we use GitHub for reviews.
- - Remove some un-inclusive terminology.
-- compute/metadata:
- - Pass cancelable context to DNS lookup.
-- .github:
- - Update templates issue/PR templates.
-- internal:
- - Bump several clients to GA.
- - Fix GoDoc badge source.
- - Several automation changes related to the move to GitHub.
- - Start generating a client for asset v1p5beta1.
-- Various updates to autogenerated clients.
-
-## v0.58.0
-
-### Deprecation notice
-
-- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking
- changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`.
-
-### Changes
-
-- all:
- - The remaining uses of gtransport.Dial have been removed.
- - The `genproto` dependency has been updated to a version that makes use of
- new `protoreflect` library. For more information on these protobuf changes
- please see the following post from the official Go blog:
- https://blog.golang.org/protobuf-apiv2.
-- internal:
- - Started generation of datastore admin v1 client.
- - Updated protofuf version used for generation to 3.12.X.
- - Update the release levels for several APIs.
- - Generate clients with protoc-gen-go@v1.4.1.
-- monitoring:
- - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation
- notice above).
-- profiler:
- - Fixed flakiness in tests.
-- Various updates to autogenerated clients.
-
-## v0.57.0
-
-- all:
- - Update module dependency `google.golang.org/api` to `v0.21.0`.
-- errorreporting:
- - Add exported SetGoogleClientInfo wrappers to manual file.
-- expr/v1alpha1:
- - Deprecate client. This client will be removed in a future release.
-- internal:
- - Fix possible data race in TestTracer.
- - Pin versions of tools used for generation.
- - Correct the release levels for BigQuery APIs.
- - Start generation osconfig v1.
-- longrunning:
- - Add exported SetGoogleClientInfo wrappers to manual file.
-- monitoring:
- - Stop generation of monitoring/apiv3 because of incoming breaking change.
-- trace:
- - Add exported SetGoogleClientInfo wrappers to manual file.
-- Various updates to autogenerated clients.
-
-## v0.56.0
-
-- secretmanager:
- - add IAM helper
-- profiler:
- - try all us-west1 zones for integration tests
-- internal:
- - add config to generate webrisk v1
- - add repo and commit to buildcop invocation
- - add recaptchaenterprise v1 generation config
- - update microgenerator to v0.12.5
- - add datacatalog client
- - start generating security center settings v1beta
- - start generating osconfig agentendpoint v1
- - setup generation for bigquery/connection/v1beta1
-- all:
- - increase continous testing timeout to 45m
- - various updates to autogenerated clients.
-
-## v0.55.0
-
-- Various updates to autogenerated clients.
-
-## v0.54.0
-
-- all:
- - remove unused golang.org/x/exp from mod file
- - update godoc.org links to pkg.go.dev
-- compute/metadata:
- - use defaultClient when http.Client is nil
- - remove subscribeClient
-- iam:
- - add support for v3 policy and IAM conditions
-- Various updates to autogenerated clients.
-
-## v0.53.0
-
-- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers).
- - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API.
-- profiler: remove symbolization (drops support for go1.10)
-- Various updates to autogenerated clients.
-
-## v0.52.0
-
-- internal/gapicgen: multiple improvements related to library generation.
-- compute/metadata: unset ResponseHeaderTimeout in defaultClient
-- docs: fix link to KMS in README.md
-- Various updates to autogenerated clients.
-
-## v0.51.0
-
-- secretmanager:
- - add IAM helper for generic resource IAM handle
-- cloudbuild:
- - migrate to microgen in a major version
-- Various updates to autogenerated clients.
-
-## v0.50.0
-
-- profiler:
- - Support disabling CPU profile collection.
- - Log when a profile creation attempt begins.
-- compute/metadata:
- - Fix panic on malformed URLs.
- - InstanceName returns actual instance name.
-- Various updates to autogenerated clients.
-
-## v0.49.0
-
-- functions/metadata:
- - Handle string resources in JSON unmarshaller.
-- Various updates to autogenerated clients.
-
-## v0.48.0
-
-- Various updates to autogenerated clients
-
-## v0.47.0
-
-This release drops support for Go 1.9 and Go 1.10: we continue to officially
-support Go 1.11, Go 1.12, and Go 1.13.
-
-- Various updates to autogenerated clients.
-- Add cloudbuild/apiv1 client.
-
-## v0.46.3
-
-This is an empty release that was created solely to aid in storage's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.46.2
-
-This is an empty release that was created solely to aid in spanner's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.46.1
-
-This is an empty release that was created solely to aid in firestore's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.46.0
-
-- spanner:
- - Retry "Session not found" for read-only transactions.
- - Retry aborted PDMLs.
-- spanner/spannertest:
- - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly.
-- storage:
- - Add HMACKeyOptions.
- - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL,
- DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice
- StorageClasses but they are still acceptable values.
-- trace:
- - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been
- marked OBSOLETE for several years: it is now no longer provided. If you
- relied on this package, please vendor it or switch to using
- https://cloud.google.com/trace/docs/setup/go (which obsoleted it).
-
-## v0.45.1
-
-This is an empty release that was created solely to aid in pubsub's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.45.0
-
-- compute/metadata:
- - Add Email method.
-- storage:
- - Fix duplicated retry logic.
- - Add ReaderObjectAttrs.StartOffset.
- - Support reading last N bytes of a file when a negative range is given, such
- as `obj.NewRangeReader(ctx, -10, -1)`.
- - Add HMACKey listing functionality.
-- spanner/spannertest:
- - Support primary keys with no columns.
- - Fix MinInt64 parsing.
- - Implement deletion of key ranges.
- - Handle reads during a read-write transaction.
- - Handle returning DATE values.
-- pubsub:
- - Fix Ack/Modack request size calculation.
-- logging:
- - Add auto-detection of monitored resources on GAE Standard.
-
-## v0.44.3
-
-This is an empty release that was created solely to aid in bigtable's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.44.2
-
-This is an empty release that was created solely to aid in bigquery's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.44.1
-
-This is an empty release that was created solely to aid in datastore's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.44.0
-
-- datastore:
- - Interface elements whose underlying types are supported, are now supported.
- - Reduce time to initial retry from 1s to 100ms.
-- firestore:
- - Add Increment transformation.
-- storage:
- - Allow emulator with STORAGE_EMULATOR_HOST.
- - Add methods for HMAC key management.
-- pubsub:
- - Add PublishCount and PublishLatency measurements.
- - Add DefaultPublishViews and DefaultSubscribeViews for convenience of
- importing all views.
- - Add add Subscription.PushConfig.AuthenticationMethod.
-- spanner:
- - Allow emulator usage with SPANNER_EMULATOR_HOST.
- - Add cloud.google.com/go/spanner/spannertest, a spanner emulator.
- - Add cloud.google.com/go/spanner/spansql which contains types and a parser
- for the Cloud Spanner SQL dialect.
-- asset:
- - Add apiv1p2beta1 client.
-
-## v0.43.0
-
-This is an empty release that was created solely to aid in logging's module
-carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
-
-## v0.42.0
-
-- bigtable:
- - Add an admin method to update an instance and clusters.
- - Fix bttest regex matching behavior for alternations (things like `|a`).
- - Expose BlockAllFilter filter.
-- bigquery:
- - Add Routines API support.
-- storage:
- - Add read-only Bucket.LocationType.
-- logging:
- - Add TraceSampled to Entry.
- - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context.
-- pubsub:
- - Add Cloud Key Management to TopicConfig.
- - Change ExpirationPolicy to optional.Duration.
-- automl:
- - Add apiv1beta1 client.
-- iam:
- - Fix compilation problem with iam/credentials/apiv1.
-
-## v0.41.0
-
-- bigtable:
- - Check results from PredicateFilter in bttest, which fixes certain false matches.
-- profiler:
- - debugLog checks user defined logging options before logging.
-- spanner:
- - PartitionedUpdates respect query parameters.
- - StartInstance allows specifying cloud API access scopes.
-- bigquery:
- - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics.
-- firestore:
- - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll.
-- replay:
- - Change references to IPv4 addresses to localhost, making replay compatible with IPv6.
-
-## v0.40.0
-
-- all:
- - Update to protobuf-golang v1.3.1.
-- datastore:
- - Attempt to decode GAE-encoded keys if initial decoding attempt fails.
- - Support integer time conversion.
-- pubsub:
- - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow,
- this value should be adjusted higher.
- - Use IPv6 compatible target in testutil.
-- bigtable:
- - Fix Latin-1 regexp filters in bttest, allowing \C.
- - Expose PassAllFilter.
-- profiler:
- - Add log messages for slow path in start.
- - Fix start to allow retry until success.
-- firestore:
- - Add admin client.
-- containeranalysis:
- - Add apiv1 client.
-- grafeas:
- - Add apiv1 client.
-
-## 0.39.0
-
-- bigtable:
- - Implement DeleteInstance in bttest.
- - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest.
-- bigquery:
- - Move RequirePartitionFilter outside of TimePartioning.
- - Expose models API.
-- firestore:
- - Allow array values in create and update calls.
- - Add CollectionGroup method.
-- pubsub:
- - Add ExpirationPolicy to Subscription.
-- storage:
- - Add V4 signing.
-- rpcreplay:
- - Match streams by first sent request. This further improves rpcreplay's
- ability to distinguish streams.
-- httpreplay:
- - Set up Man-In-The-Middle config only once. This should improve proxy
- creation when multiple proxies are used in a single process.
- - Remove error on empty Content-Type, allowing requests with no Content-Type
- header but a non-empty body.
-- all:
- - Fix an edge case bug in auto-generated library pagination by properly
- propagating pagetoken.
-
-## 0.38.0
-
-This update includes a substantial reduction in our transitive dependency list
-by way of updating to opencensus@v0.21.0.
-
-- spanner:
- - Error implements GRPCStatus, allowing status.Convert.
-- bigtable:
- - Fix a bug in bttest that prevents single column queries returning results
- that match other filters.
- - Remove verbose retry logging.
-- logging:
- - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and
- rune replace manually.
-- recaptchaenterprise:
- - Add v1beta1 client.
-- phishingprotection:
- - Add v1beta1 client.
-
-## 0.37.4
-
-This patch releases re-builds the go.sum. This was not possible in the
-previous release.
-
-- firestore:
- - Add sentinel value DetectProjectID for auto-detecting project ID.
- - Add OpenCensus tracing for public methods.
- - Marked stable. All future changes come with a backwards compatibility
- guarantee.
- - Removed firestore/apiv1beta1. All users relying on this low-level library
- should migrate to firestore/apiv1. Note that most users should use the
- high-level firestore package instead.
-- pubsub:
- - Allow large messages in synchronous pull case.
- - Cap bundler byte limit. This should prevent OOM conditions when there are
- a very large number of message publishes occurring.
-- storage:
- - Add ETag to BucketAttrs and ObjectAttrs.
-- datastore:
- - Removed some non-sensical OpenCensus traces.
-- webrisk:
- - Add v1 client.
-- asset:
- - Add v1 client.
-- cloudtasks:
- - Add v2 client.
-
-## 0.37.3
-
-This patch release removes github.com/golang/lint from the transitive
-dependency list, resolving `go get -u` problems.
-
-Note: this release intentionally has a broken go.sum. Please use v0.37.4.
-
-## 0.37.2
-
-This patch release is mostly intended to bring in v0.3.0 of
-google.golang.org/api, which fixes a GCF deployment issue.
-
-Note: we had to-date accidentally marked Redis as stable. In this release, we've
-fixed it by downgrading its documentation to alpha, as it is in other languages
-and docs.
-
-- all:
- - Document context in generated libraries.
-
-## 0.37.1
-
-Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which
-introduces a new oauth2 url.
-
-## 0.37.0
-
-- spanner:
- - Add BatchDML method.
- - Reduced initial time between retries.
-- bigquery:
- - Produce better error messages for InferSchema.
- - Add logical type control for avro loads.
- - Add support for the GEOGRAPHY type.
-- datastore:
- - Add sentinel value DetectProjectID for auto-detecting project ID.
- - Allow flatten tag on struct pointers.
- - Fixed a bug that caused queries to panic with invalid queries. Instead they
- will now return an error.
-- profiler:
- - Add ability to override GCE zone and instance.
-- pubsub:
- - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more
- consistently retry specific error codes based on whether they're idempotent
- or non-idempotent.
-- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing
- the Content-Length header to be dropped.
-- iot:
- - Add new apiv1 client.
-- securitycenter:
- - Add new apiv1 client.
-- cloudscheduler:
- - Add new apiv1 client.
-
-## 0.36.0
-
-- spanner:
- - Reduce minimum retry backoff from 1s to 100ms. This makes time between
- retries much faster and should improve latency.
-- storage:
- - Add support for Bucket Policy Only.
-- kms:
- - Add ResourceIAM helper method.
- - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM.
-- firestore:
- - Switch from v1beta1 API to v1 API.
- - Allow emulator with FIRESTORE_EMULATOR_HOST.
-- bigquery:
- - Add NumLongTermBytes to Table.
- - Add TotalBytesProcessedAccuracy to QueryStatistics.
-- irm:
- - Add new v1alpha2 client.
-- talent:
- - Add new v4beta1 client.
-- rpcreplay:
- - Fix connection to work with grpc >= 1.17.
- - It is now required for an actual gRPC server to be running for Dial to
- succeed.
-
-## 0.35.1
-
-- spanner:
- - Adds OpenCensus views back to public API.
-
-## v0.35.0
-
-- all:
- - Add go.mod and go.sum.
- - Switch usage of gax-go to gax-go/v2.
-- bigquery:
- - Fix bug where time partitioning could not be removed from a table.
- - Fix panic that occurred with empty query parameters.
-- bttest:
- - Fix bug where deleted rows were returned by ReadRows.
-- bigtable/emulator:
- - Configure max message size to 256 MiB.
-- firestore:
- - Allow non-transactional queries in transactions.
- - Allow StartAt/EndBefore on direct children at any depth.
- - QuerySnapshotIterator.Stop may be called in an error state.
- - Fix bug the prevented reset of transaction write state in between retries.
-- functions/metadata:
- - Make Metadata.Resource a pointer.
-- logging:
- - Make SpanID available in logging.Entry.
-- metadata:
- - Wrap !200 error code in a typed err.
-- profiler:
- - Add function to check if function name is within a particular file in the
- profile.
- - Set parent field in create profile request.
- - Return kubernetes client to start cluster, so client can be used to poll
- cluster.
- - Add function for checking if filename is in profile.
-- pubsub:
- - Fix bug where messages expired without an initial modack in
- synchronous=true mode.
- - Receive does not retry ResourceExhausted errors.
-- spanner:
- - client.Close now cancels existing requests and should be much faster for
- large amounts of sessions.
- - Correctly allow MinOpened sessions to be spun up.
-
-## v0.34.0
-
-- functions/metadata:
- - Switch to using JSON in context.
- - Make Resource a value.
-- vision: Fix ProductSearch return type.
-- datastore: Add an example for how to handle MultiError.
-
-## v0.33.1
-
-- compute: Removes an erroneously added go.mod.
-- logging: Populate source location in fromLogEntry.
-
-## v0.33.0
-
-- bttest:
- - Add support for apply_label_transformer.
-- expr:
- - Add expr library.
-- firestore:
- - Support retrieval of missing documents.
-- kms:
- - Add IAM methods.
-- pubsub:
- - Clarify extension documentation.
-- scheduler:
- - Add v1beta1 client.
-- vision:
- - Add product search helper.
- - Add new product search client.
-
-## v0.32.0
-
-Note: This release is the last to support Go 1.6 and 1.8.
-
-- bigquery:
- - Add support for removing an expiration.
- - Ignore NeverExpire in Table.Create.
- - Validate table expiration time.
-- cbt:
- - Add note about not supporting arbitrary bytes.
-- datastore:
- - Align key checks.
-- firestore:
- - Return an error when using Start/End without providing values.
-- pubsub:
- - Add pstest Close method.
- - Clarify MaxExtension documentation.
-- securitycenter:
- - Add v1beta1 client.
-- spanner:
- - Allow nil in mutations.
- - Improve doc of SessionPoolConfig.MaxOpened.
- - Increase session deletion timeout from 5s to 15s.
-
-## v0.31.0
-
-- bigtable:
- - Group mutations across multiple requests.
-- bigquery:
- - Link to bigquery troubleshooting errors page in bigquery.Error comment.
-- cbt:
- - Fix go generate command.
- - Document usage of both maxage + maxversions.
-- datastore:
- - Passing nil keys results in ErrInvalidKey.
-- firestore:
- - Clarify what Document.DataTo does with untouched struct fields.
-- profile:
- - Validate service name in agent.
-- pubsub:
- - Fix deadlock with pstest and ctx.Cancel.
- - Fix a possible deadlock in pstest.
-- trace:
- - Update doc URL with new fragment.
-
-Special thanks to @fastest963 for going above and beyond helping us to debug
-hard-to-reproduce Pub/Sub issues.
-
-## v0.30.0
-
-- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information.
-- bigtable: bttest supports row sample filter.
-- functions: metadata package added for accessing Cloud Functions resource metadata.
-
-## v0.29.0
-
-- bigtable:
- - Add retry to all idempotent RPCs.
- - cbt supports complex GC policies.
- - Emulator supports arbitrary bytes in regex filters.
-- firestore: Add ArrayUnion and ArrayRemove.
-- logging: Add the ContextFunc option to supply the context used for
- asynchronous RPCs.
-- profiler: Ignore NotDefinedError when fetching the instance name
-- pubsub:
- - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled.
- - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning.
- - Fix deadlock.
- - Restore Ack/Nack/Modacks metrics.
- - Improve context handling in iterator.
- - Implement synchronous mode for Receive.
- - pstest: add Pull.
-- spanner: Add a metric for the number of sessions currently opened.
-- storage:
- - Canceling the context releases all resources.
- - Add additional RetentionPolicy attributes.
-- vision/apiv1: Add LocalizeObjects method.
-
-## v0.28.0
-
-- bigtable:
- - Emulator returns Unimplemented for snapshot RPCs.
-- bigquery:
- - Support zero-length repeated, nested fields.
-- cloud assets:
- - Add v1beta client.
-- datastore:
- - Don't nil out transaction ID on retry.
-- firestore:
- - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next
- returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator
- (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots.
- - Add array-contains operator.
-- IAM:
- - Add iam/credentials/apiv1 client.
-- pubsub:
- - Canceling the context passed to Subscription.Receive causes Receive to return when
- processing finishes on all messages currently in progress, even if new messages are arriving.
-- redis:
- - Add redis/apiv1 client.
-- storage:
- - Add Reader.Attrs.
- - Deprecate several Reader getter methods: please use Reader.Attrs for these instead.
- - Add ObjectHandle.Bucket and ObjectHandle.Object methods.
-
-## v0.27.0
-
-- bigquery:
- - Allow modification of encryption configuration and partitioning options to a table via the Update call.
- - Add a SchemaFromJSON function that converts a JSON table schema.
-- bigtable:
- - Restore cbt count functionality.
-- containeranalysis:
- - Add v1beta client.
-- spanner:
- - Fix a case where an iterator might not be closed correctly.
-- storage:
- - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount.
- - Add a method to Reader that returns the parsed value of the Last-Modified header.
-
-## v0.26.0
-
-- bigquery:
- - Support filtering listed jobs by min/max creation time.
- - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering).
- - Include job creator email in Job struct.
-- bigtable:
- - Add `RowSampleFilter`.
- - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters
- must match the entire target string to succeed. Previously, the emulator was
- succeeding on partial matches.
- NOTE: As of this release, this change only affects the emulator when run
- from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched
- from `gcloud` will be updated in a subsequent `gcloud` release.
-- dataproc: Add apiv1beta2 client.
-- datastore: Save non-nil pointer fields on omitempty.
-- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header.
-- logging/logadmin: Support writer_identity and include_children.
-- pubsub:
- - Support labels on topics and subscriptions.
- - Support message storage policy for topics.
- - Use the distribution of ack times to determine when to extend ack deadlines.
- The only user-visible effect of this change should be that programs that
- call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub
- Subscriber`.
-- storage:
- - Support predefined ACLs.
- - Support additional ACL fields other than Entity and Role.
- - Support bucket websites.
- - Support bucket logging.
-
-
-## v0.25.0
-
-- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md)
-- bigtable:
- - cbt: Support a GC policy of "never".
-- errorreporting:
- - Support User.
- - Close now calls Flush.
- - Use OnError (previously ignored).
- - Pass through the RPC error as-is to OnError.
-- httpreplay: A tool for recording and replaying HTTP requests
- (for the bigquery and storage clients in this repo).
-- kms: v1 client added
-- logging: add SourceLocation to Entry.
-- storage: improve CRC checking on read.
-
-## v0.24.0
-
-- bigquery: Support for the NUMERIC type.
-- bigtable:
- - cbt: Optionally specify columns for read/lookup
- - Support instance-level administration.
-- oslogin: New client for the OS Login API.
-- pubsub:
- - The package is now stable. There will be no further breaking changes.
- - Internal changes to improve Subscription.Receive behavior.
-- storage: Support updating bucket lifecycle config.
-- spanner: Support struct-typed parameter bindings.
-- texttospeech: New client for the Text-to-Speech API.
-
-## v0.23.0
-
-- bigquery: Add DDL stats to query statistics.
-- bigtable:
- - cbt: Add cells-per-column limit for row lookup.
- - cbt: Make it possible to combine read filters.
-- dlp: v2beta2 client removed. Use the v2 client instead.
-- firestore, spanner: Fix compilation errors due to protobuf changes.
-
-## v0.22.0
-
-- bigtable:
- - cbt: Support cells per column limit for row read.
- - bttest: Correctly handle empty RowSet.
- - Fix ReadModifyWrite operation in emulator.
- - Fix API path in GetCluster.
-
-- bigquery:
- - BEHAVIOR CHANGE: Retry on 503 status code.
- - Add dataset.DeleteWithContents.
- - Add SchemaUpdateOptions for query jobs.
- - Add Timeline to QueryStatistics.
- - Add more stats to ExplainQueryStage.
- - Support Parquet data format.
-
-- datastore:
- - Support omitempty for times.
-
-- dlp:
- - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client,
- which is now out of beta.
- - Add v2 client.
-
-- firestore:
- - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid.
-
-- iam:
- - Support JWT signing via SignJwt callopt.
-
-- profiler:
- - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done.
- - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute.
- - Avoid returning empty serial port output.
-
-- pubsub:
- - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy.
- - BEHAVIOR CHANGE: Don't backoff on EOF.
- - pstest: Support Acknowledge and ModifyAckDeadline RPCs.
-
-- redis:
- - Add v1 beta Redis client.
-
-- spanner:
- - Support SessionLabels.
-
-- speech:
- - Add api v1 beta1 client.
-
-- storage:
- - BEHAVIOR CHANGE: Retry reads when retryable error occurs.
- - Fix delete of object in requester-pays bucket.
- - Support KMS integration.
-
-## v0.21.0
-
-- bigquery:
- - Add OpenCensus tracing.
-
-- firestore:
- - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot
- whose Exists method returns false. DocumentRef.Get and Transaction.Get
- return the non-nil DocumentSnapshot in addition to a NotFound error.
- **DocumentRef.GetAll and Transaction.GetAll return a non-nil
- DocumentSnapshot instead of nil.**
- - Add DocumentIterator.Stop. **Call Stop whenever you are done with a
- DocumentIterator.**
- - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime
- notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen.
- - Canceling an RPC now always returns a grpc.Status with codes.Canceled.
-
-- spanner:
- - Add `CommitTimestamp`, which supports inserting the commit timestamp of a
- transaction into a column.
-
-## v0.20.0
-
-- bigquery: Support SchemaUpdateOptions for load jobs.
-
-- bigtable:
- - Add SampleRowKeys.
- - cbt: Support union, intersection GCPolicy.
- - Retry admin RPCS.
- - Add trace spans to retries.
-
-- datastore: Add OpenCensus tracing.
-
-- firestore:
- - Fix queries involving Null and NaN.
- - Allow Timestamp protobuffers for time values.
-
-- logging: Add a WriteTimeout option.
-
-- spanner: Support Batch API.
-
-- storage: Add OpenCensus tracing.
-
-## v0.19.0
-
-- bigquery:
- - Support customer-managed encryption keys.
-
-- bigtable:
- - Improved emulator support.
- - Support GetCluster.
-
-- datastore:
- - Add general mutations.
- - Support pointer struct fields.
- - Support transaction options.
-
-- firestore:
- - Add Transaction.GetAll.
- - Support document cursors.
-
-- logging:
- - Support concurrent RPCs to the service.
- - Support per-entry resources.
-
-- profiler:
- - Add config options to disable heap and thread profiling.
- - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
-
-- pubsub:
- - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
- callback returns).
- - Add SubscriptionInProject.
- - Add OpenCensus instrumentation for streaming pull.
-
-- storage:
- - Support CORS.
-
-## v0.18.0
-
-- bigquery:
- - Marked stable.
- - Schema inference of nullable fields supported.
- - Added TimePartitioning to QueryConfig.
-
-- firestore: Data provided to DocumentRef.Set with a Merge option can contain
- Delete sentinels.
-
-- logging: Clients can accept parent resources other than projects.
-
-- pubsub:
- - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
- - Support updating more subscription metadata: AckDeadline,
- RetainAckedMessages and RetentionDuration.
-
-- oslogin/apiv1beta: New client for the Cloud OS Login API.
-
-- rpcreplay: A package for recording and replaying gRPC traffic.
-
-- spanner:
- - Add a ReadWithOptions that supports a row limit, as well as an index.
- - Support query plan and execution statistics.
- - Added [OpenCensus](http://opencensus.io) support.
-
-- storage: Clarify checksum validation for gzipped files (it is not validated
- when the file is served uncompressed).
-
-
-## v0.17.0
-
-- firestore BREAKING CHANGES:
- - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
- Change
- `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
- to
- `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
-
- Change
- `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
- to
- `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
- - Rename MergePaths to Merge; require args to be FieldPaths
- - A value stored as an integer can be read into a floating-point field, and vice versa.
-- bigtable/cmd/cbt:
- - Support deleting a column.
- - Add regex option for row read.
-- spanner: Mark stable.
-- storage:
- - Add Reader.ContentEncoding method.
- - Fix handling of SignedURL headers.
-- bigquery:
- - If Uploader.Put is called with no rows, it returns nil without making a
- call.
- - Schema inference supports the "nullable" option in struct tags for
- non-required fields.
- - TimePartitioning supports "Field".
-
-
-## v0.16.0
-
-- Other bigquery changes:
- - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
- - UseStandardSQL is deprecated; set UseLegacySQL to true if you need
- Legacy SQL.
- - Uploader.Put will generate a random insert ID if you do not provide one.
- - Support time partitioning for load jobs.
- - Support dry-run queries.
- - A `Job` remembers its last retrieved status.
- - Support retrieving job configuration.
- - Support labels for jobs and tables.
- - Support dataset access lists.
- - Improve support for external data sources, including data from Bigtable and
- Google Sheets, and tables with external data.
- - Support updating a table's view configuration.
- - Fix uploading civil times with nanoseconds.
-
-- storage:
- - Support PubSub notifications.
- - Support Requester Pays buckets.
-
-- profiler: Support goroutine and mutex profile types.
-
-## v0.15.0
-
-- firestore: beta release. See the
- [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
-
-- errorreporting: The existing package has been redesigned.
-
-- errors: This package has been removed. Use errorreporting.
-
-
-## v0.14.0
-
-- bigquery BREAKING CHANGES:
- - Standard SQL is the default for queries and views.
- - `Table.Create` takes `TableMetadata` as a second argument, instead of
- options.
- - `Dataset.Create` takes `DatasetMetadata` as a second argument.
- - `DatasetMetadata` field `ID` renamed to `FullID`
- - `TableMetadata` field `ID` renamed to `FullID`
-
-- Other bigquery changes:
- - The client will append a random suffix to a provided job ID if you set
- `AddJobIDSuffix` to true in a job config.
- - Listing jobs is supported.
- - Better retry logic.
-
-- vision, language, speech: clients are now stable
-
-- monitoring: client is now beta
-
-- profiler:
- - Rename InstanceName to Instance, ZoneName to Zone
- - Auto-detect service name and version on AppEngine.
-
-## v0.13.0
-
-- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
- options to continue using Legacy SQL after the client switches its default
- to Standard SQL.
-
-- bigquery: Support for updating dataset labels.
-
-- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
- than the client's. DatasetsInProject is no longer needed and is deprecated.
-
-- bigtable: Fail ListInstances when any zones fail.
-
-- spanner: support decoding of slices of basic types (e.g. []string, []int64,
- etc.)
-
-- logging/logadmin: UpdateSink no longer creates a sink if it is missing
- (actually a change to the underlying service, not the client)
-
-- profiler: Service and ServiceVersion replace Target in Config.
-
-## v0.12.0
-
-- pubsub: Subscription.Receive now uses streaming pull.
-
-- pubsub: add Client.TopicInProject to access topics in a different project
- than the client.
-
-- errors: renamed errorreporting. The errors package will be removed shortly.
-
-- datastore: improved retry behavior.
-
-- bigquery: support updates to dataset metadata, with etags.
-
-- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
-
-- bigquery: generate all job IDs on the client.
-
-- storage: support bucket lifecycle configurations.
-
-
-## v0.11.0
-
-- Clients for spanner, pubsub and video are now in beta.
-
-- New client for DLP.
-
-- spanner: performance and testing improvements.
-
-- storage: requester-pays buckets are supported.
-
-- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
-
-- pubsub: bug fixes and other minor improvements
-
-## v0.10.0
-
-- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
-
-- pubsub: Subscription.Receive now runs concurrently for higher throughput.
-
-- vision: cloud.google.com/go/vision is deprecated. Use
-cloud.google.com/go/vision/apiv1 instead.
-
-- translation: now stable.
-
-- trace: several changes to the surface. See the link below.
-
-### Code changes required from v0.9.0
-
-- pubsub: Replace
-
- ```
- sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
- ```
-
- with
-
- ```
- sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
- PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
- })
- ```
-
-- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
-Given an initialized `*trace.Client` named `tc`, instead of
-
- ```
- s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
- ```
-
- write
-
- ```
- s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
- ```
-
-- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
-Instead of
-
- ```
- conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
- ```
-
- write
-
- ```
- conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
- ```
-
-- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
-interceptor as a dial option as shown below when initializing Cloud package
-clients:
-
- ```
- c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
- if err != nil {
- ...
- }
- ```
-
-
-## v0.9.0
-
-- Breaking changes to some autogenerated clients.
-- rpcreplay package added.
-
-## v0.8.0
-
-- profiler package added.
-- storage:
- - Retry Objects.Insert call.
- - Add ProgressFunc to WRiter.
-- pubsub: breaking changes:
- - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
- - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
- - Message.Done replaced with Message.Ack and Message.Nack.
-
-## v0.7.0
-
-- Release of a client library for Spanner. See
-the
-[blog
-post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
-Note that although the Spanner service is beta, the Go client library is alpha.
-
-## v0.6.0
-
-- Beta release of BigQuery, DataStore, Logging and Storage. See the
-[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
-
-- bigquery:
- - struct support. Read a row directly into a struct with
-`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
-You can also use field tags. See the [package documentation][cloud-bigquery-ref]
-for details.
-
- - The `ValueList` type was removed. It is no longer necessary. Instead of
- ```go
- var v ValueList
- ... it.Next(&v) ..
- ```
- use
-
- ```go
- var v []Value
- ... it.Next(&v) ...
- ```
-
- - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
- `ValueList` would append to the slice. Now each call resets the size to zero first.
-
- - Schema inference will infer the SQL type BYTES for a struct field of
- type []byte. Previously it inferred STRING.
-
- - The types `uint`, `uint64` and `uintptr` are no longer supported in schema
- inference. BigQuery's integer type is INT64, and those types may hold values
- that are not correctly represented in a 64-bit signed integer.
-
-## v0.5.0
-
-- bigquery:
- - The SQL types DATE, TIME and DATETIME are now supported. They correspond to
- the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
- package.
- - Support for query parameters.
- - Support deleting a dataset.
- - Values from INTEGER columns will now be returned as int64, not int. This
- will avoid errors arising from large values on 32-bit systems.
-- datastore:
- - Nested Go structs encoded as Entity values, instead of a
-flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg.
- ```go
- type State struct {
- Cities []struct{
- Populations []int
- }
- }
- ```
- See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
-more details.
- - Contexts no longer hold namespaces; instead you must set a key's namespace
- explicitly. Also, key functions have been changed and renamed.
- - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
- ```go
- q := datastore.NewQuery("Kind").Namespace("ns")
- ```
- - All the fields of Key are exported. That means you can construct any Key with a struct literal:
- ```go
- k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
- ```
- - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
- - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
- ```go
- NewIncompleteKey(ctx, kind, parent)
- ```
- with
- ```go
- IncompleteKey(kind, parent)
- ```
- and if you do use namespaces, make sure you set the namespace on the returned key.
- - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
- ```go
- NewKey(ctx, kind, name, 0, parent)
- NewKey(ctx, kind, "", id, parent)
- ```
- with
- ```go
- NameKey(kind, name, parent)
- IDKey(kind, id, parent)
- ```
- and if you do use namespaces, make sure you set the namespace on the returned key.
- - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
- - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
- - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
-more details.
-
-## v0.4.0
-
-- bigquery:
- -`NewGCSReference` is now a function, not a method on `Client`.
- - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
- loading data into a table from a file or any `io.Reader`.
- * Client.Table and Client.OpenTable have been removed.
- Replace
- ```go
- client.OpenTable("project", "dataset", "table")
- ```
- with
- ```go
- client.DatasetInProject("project", "dataset").Table("table")
- ```
-
- * Client.CreateTable has been removed.
- Replace
- ```go
- client.CreateTable(ctx, "project", "dataset", "table")
- ```
- with
- ```go
- client.DatasetInProject("project", "dataset").Table("table").Create(ctx)
- ```
-
- * Dataset.ListTables have been replaced with Dataset.Tables.
- Replace
- ```go
- tables, err := ds.ListTables(ctx)
- ```
- with
- ```go
- it := ds.Tables(ctx)
- for {
- table, err := it.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- // TODO: Handle error.
- }
- // TODO: use table.
- }
- ```
-
- * Client.Read has been replaced with Job.Read, Table.Read and Query.Read.
- Replace
- ```go
- it, err := client.Read(ctx, job)
- ```
- with
- ```go
- it, err := job.Read(ctx)
- ```
- and similarly for reading from tables or queries.
-
- * The iterator returned from the Read methods is now named RowIterator. Its
- behavior is closer to the other iterators in these libraries. It no longer
- supports the Schema method; see the next item.
- Replace
- ```go
- for it.Next(ctx) {
- var vals ValueList
- if err := it.Get(&vals); err != nil {
- // TODO: Handle error.
- }
- // TODO: use vals.
- }
- if err := it.Err(); err != nil {
- // TODO: Handle error.
- }
- ```
- with
- ```
- for {
- var vals ValueList
- err := it.Next(&vals)
- if err == iterator.Done {
- break
- }
- if err != nil {
- // TODO: Handle error.
- }
- // TODO: use vals.
- }
- ```
- Instead of the `RecordsPerRequest(n)` option, write
- ```go
- it.PageInfo().MaxSize = n
- ```
- Instead of the `StartIndex(i)` option, write
- ```go
- it.StartIndex = i
- ```
-
- * ValueLoader.Load now takes a Schema in addition to a slice of Values.
- Replace
- ```go
- func (vl *myValueLoader) Load(v []bigquery.Value)
- ```
- with
- ```go
- func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema)
- ```
-
-
- * Table.Patch is replace by Table.Update.
- Replace
- ```go
- p := table.Patch()
- p.Description("new description")
- metadata, err := p.Apply(ctx)
- ```
- with
- ```go
- metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{
- Description: "new description",
- })
- ```
-
- * Client.Copy is replaced by separate methods for each of its four functions.
- All options have been replaced by struct fields.
-
- * To load data from Google Cloud Storage into a table, use Table.LoaderFrom.
-
- Replace
- ```go
- client.Copy(ctx, table, gcsRef)
- ```
- with
- ```go
- table.LoaderFrom(gcsRef).Run(ctx)
- ```
- Instead of passing options to Copy, set fields on the Loader:
- ```go
- loader := table.LoaderFrom(gcsRef)
- loader.WriteDisposition = bigquery.WriteTruncate
- ```
-
- * To extract data from a table into Google Cloud Storage, use
- Table.ExtractorTo. Set fields on the returned Extractor instead of
- passing options.
-
- Replace
- ```go
- client.Copy(ctx, gcsRef, table)
- ```
- with
- ```go
- table.ExtractorTo(gcsRef).Run(ctx)
- ```
-
- * To copy data into a table from one or more other tables, use
- Table.CopierFrom. Set fields on the returned Copier instead of passing options.
-
- Replace
- ```go
- client.Copy(ctx, dstTable, srcTable)
- ```
- with
- ```go
- dst.Table.CopierFrom(srcTable).Run(ctx)
- ```
-
- * To start a query job, create a Query and call its Run method. Set fields
- on the query instead of passing options.
-
- Replace
- ```go
- client.Copy(ctx, table, query)
- ```
- with
- ```go
- query.Run(ctx)
- ```
-
- * Table.NewUploader has been renamed to Table.Uploader. Instead of options,
- configure an Uploader by setting its fields.
- Replace
- ```go
- u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
- ```
- with
- ```go
- u := table.NewUploader(bigquery.UploadIgnoreUnknownValues())
- u.IgnoreUnknownValues = true
- ```
-
-- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package
-`google.golang.org/api/iterator`.
-
-## v0.3.0
-
-- storage:
- * AdminClient replaced by methods on Client.
- Replace
- ```go
- adminClient.CreateBucket(ctx, bucketName, attrs)
- ```
- with
- ```go
- client.Bucket(bucketName).Create(ctx, projectID, attrs)
- ```
-
- * BucketHandle.List replaced by BucketHandle.Objects.
- Replace
- ```go
- for query != nil {
- objs, err := bucket.List(d.ctx, query)
- if err != nil { ... }
- query = objs.Next
- for _, obj := range objs.Results {
- fmt.Println(obj)
- }
- }
- ```
- with
- ```go
- iter := bucket.Objects(d.ctx, query)
- for {
- obj, err := iter.Next()
- if err == iterator.Done {
- break
- }
- if err != nil { ... }
- fmt.Println(obj)
- }
- ```
- (The `iterator` package is at `google.golang.org/api/iterator`.)
-
- Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`.
-
- Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`.
-
-
- * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom.
- Replace
- ```go
- attrs, err := src.CopyTo(ctx, dst, nil)
- ```
- with
- ```go
- attrs, err := dst.CopierFrom(src).Run(ctx)
- ```
-
- Replace
- ```go
- attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"})
- ```
- with
- ```go
- c := dst.CopierFrom(src)
- c.ContextType = "text/html"
- attrs, err := c.Run(ctx)
- ```
-
- * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom.
- Replace
- ```go
- attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil)
- ```
- with
- ```go
- attrs, err := dst.ComposerFrom(src1, src2).Run(ctx)
- ```
-
- * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate.
- Replace
- ```go
- attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"})
- ```
- with
- ```go
- attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"})
- ```
-
- * ObjectHandle.WithConditions replaced by ObjectHandle.If.
- Replace
- ```go
- obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen))
- ```
- with
- ```go
- obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen})
- ```
-
- Replace
- ```go
- obj.WithConditions(storage.IfGenerationMatch(0))
- ```
- with
- ```go
- obj.If(storage.Conditions{DoesNotExist: true})
- ```
-
- * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`).
-
-- Package preview/logging deleted. Use logging instead.
-
-## v0.2.0
-
-- Logging client replaced with preview version (see below).
-
-- New clients for some of Google's Machine Learning APIs: Vision, Speech, and
-Natural Language.
-
-- Preview version of a new [Stackdriver Logging][cloud-logging] client in
-[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging).
-This client uses gRPC as its transport layer, and supports log reading, sinks
-and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly.
diff --git a/upstream/vendor/cloud.google.com/go/CONTRIBUTING.md b/upstream/vendor/cloud.google.com/go/CONTRIBUTING.md
deleted file mode 100644
index 36d1b275e33..00000000000
--- a/upstream/vendor/cloud.google.com/go/CONTRIBUTING.md
+++ /dev/null
@@ -1,364 +0,0 @@
-# Contributing
-
-1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose).
- The issue will be used to discuss the bug or feature and should be created
- before sending a PR.
-
-1. [Install Go](https://golang.org/dl/).
- 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`)
- is in your `PATH`.
- 1. Check it's working by running `go version`.
- * If it doesn't work, check the install location, usually
- `/usr/local/go`, is on your `PATH`.
-
-1. Sign one of the
-[contributor license agreements](#contributor-license-agreements) below.
-
-1. Clone the repo:
- `git clone https://github.com/googleapis/google-cloud-go`
-
-1. Change into the checked out source:
- `cd google-cloud-go`
-
-1. Fork the repo.
-
-1. Set your fork as a remote:
- `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
-
-1. Make changes, commit to your fork.
-
- Commit messages should follow the
- [Conventional Commits Style](https://www.conventionalcommits.org). The scope
- portion should always be filled with the name of the package affected by the
- changes being made. For example:
- ```
- feat(functions): add gophers codelab
- ```
-
-1. Send a pull request with your changes.
-
- To minimize friction, consider setting `Allow edits from maintainers` on the
- PR, which will enable project committers and automation to update your PR.
-
-1. A maintainer will review the pull request and make comments.
-
- Prefer adding additional commits over amending and force-pushing since it can
- be difficult to follow code reviews when the commit history changes.
-
- Commits will be squashed when they're merged.
-
-## Policy on new dependencies
-
-While the Go ecosystem is rich with useful modules, in this project we try to
-minimize the number of direct dependencies we have on modules that are not
-Google-owned.
-
-Adding new third party dependencies can have the following effects:
-* broadens the vulnerability surface
-* increases so called "vanity" import routing infrastructure failure points
-* increases complexity of our own [`third_party`][] imports
-
-So if you are contributing, please either contribute the full implementation
-directly, or find a Google-owned project that provides the functionality. Of
-course, there may be exceptions to this rule, but those should be well defined
-and agreed upon by the maintainers ahead of time.
-
-## Testing
-
-We test code against two versions of Go, the minimum and maximum versions
-supported by our clients. To see which versions these are checkout our
-[README](README.md#supported-versions).
-
-### Integration Tests
-
-In addition to the unit tests, you may run the integration test suite. These
-directions describe setting up your environment to run integration tests for
-_all_ packages: note that many of these instructions may be redundant if you
-intend only to run integration tests on a single package.
-
-#### GCP Setup
-
-To run the integrations tests, creation and configuration of three projects in
-the Google Developers Console is required: one specifically for Firestore
-integration tests, one specifically for Bigtable integration tests, and another
-for all other integration tests. We'll refer to these projects as
-"Firestore project", "Bigtable project" and "general project".
-
-Note: You can skip setting up Bigtable project if you do not plan working on or running a few Bigtable
-tests that require a secondary project
-
-After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount)
-for each project. Ensure the project-level **Owner**
-[IAM role](https://console.cloud.google.com/iam-admin/iam/project) role is added to
-each service account. During the creation of the service account, you should
-download the JSON credential file for use later.
-
-Next, ensure the following APIs are enabled in the general project:
-
-- BigQuery API
-- BigQuery Data Transfer API
-- Cloud Dataproc API
-- Cloud Dataproc Control API Private
-- Cloud Datastore API
-- Cloud Firestore API
-- Cloud Key Management Service (KMS) API
-- Cloud Natural Language API
-- Cloud OS Login API
-- Cloud Pub/Sub API
-- Cloud Resource Manager API
-- Cloud Spanner API
-- Cloud Speech API
-- Cloud Translation API
-- Cloud Video Intelligence API
-- Cloud Vision API
-- Compute Engine API
-- Compute Engine Instance Group Manager API
-- Container Registry API
-- Firebase Rules API
-- Google Cloud APIs
-- Google Cloud Deployment Manager V2 API
-- Google Cloud SQL
-- Google Cloud Storage
-- Google Cloud Storage JSON API
-- Google Compute Engine Instance Group Updater API
-- Google Compute Engine Instance Groups API
-- Kubernetes Engine API
-- Cloud Error Reporting API
-- Pub/Sub Lite API
-
-Next, create a Datastore database in the general project, and a Firestore
-database in the Firestore project.
-
-Finally, in the general project, create an API key for the translate API:
-
-- Go to GCP Developer Console.
-- Navigate to APIs & Services > Credentials.
-- Click Create Credentials > API Key.
-- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below.
-
-#### Local Setup
-
-Once the three projects are created and configured, set the following environment
-variables:
-
-- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g.
-bamboo-shift-455) for the general project.
-- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general
-project's service account.
-- `GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES`: Comma separated list of developer's Datastore databases. If not provided, default database i.e. empty string is used.
-- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID
-(e.g. doorway-cliff-677) for the Firestore project.
-- `GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES` : Comma separated list of developer's Firestore databases. If not provided, default database is used.
-- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the
-Firestore project's service account.
-- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API created above.
-- `GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID`: Developers Console project's ID (e.g. doorway-cliff-677) for Bigtable optional secondary project. This can be same as Firestore project or any project other than the general project.
-- `GCLOUD_TESTS_BIGTABLE_CLUSTER`: Cluster ID of Bigtable cluster in general project
-- `GCLOUD_TESTS_BIGTABLE_PRI_PROJ_SEC_CLUSTER`: Optional. Cluster ID of Bigtable secondary cluster in general project
-
-As part of the setup that follows, the following variables will be configured:
-
-- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests,
-in the form
-"projects/P/locations/L/keyRings/R". The creation of this is described below.
-- `GCLOUD_TESTS_BIGTABLE_KEYRING`: The full name of the keyring for the bigtable tests,
-in the form
-"projects/P/locations/L/keyRings/R". The creation of this is described below. Expected to be single region.
-- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone.
-
-Install the [gcloud command-line tool][gcloudcli] to your machine and use it to
-create some resources used in integration tests.
-
-From the project's root directory:
-
-``` sh
-# Sets the default project in your env.
-$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
-
-# Authenticates the gcloud tool with your account.
-$ gcloud auth login
-
-# Create the indexes for all the databases you want to use in the datastore integration tests.
-# Use empty string as databaseID or skip database flag for default database.
-$ gcloud alpha datastore indexes create --database=your-databaseID-1 --project=$GCLOUD_TESTS_GOLANG_PROJECT_ID testdata/index.yaml
-
-# Creates a Google Cloud storage bucket with the same name as your test project,
-# and with the Cloud Logging service account as owner, for the sink
-# integration tests in logging.
-$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
-$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
-
-# Creates a PubSub topic for integration tests of storage notifications.
-$ gcloud beta pubsub topics create go-storage-notification-test
-# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user
-# "service-@gs-project-accounts.iam.gserviceaccount.com"
-# as a publisher to that topic.
-
-# Creates a Spanner instance for the spanner integration tests.
-$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test'
-# NOTE: Spanner instances are priced by the node-hour, so you may want to
-# delete the instance after testing with 'gcloud beta spanner instances delete'.
-
-$ export MY_KEYRING=some-keyring-name
-$ export MY_LOCATION=global
-$ export MY_SINGLE_LOCATION=us-central1
-# Creates a KMS keyring, in the same location as the default location for your
-# project's buckets.
-$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION
-# Creates two keys in the keyring, named key1 and key2.
-$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
-$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption
-# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable.
-$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
-# Authorizes Google Cloud Storage to encrypt and decrypt using key1.
-$ gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1
-
-# Create KMS Key in one region for Bigtable
-$ gcloud kms keyrings create $MY_KEYRING --location $MY_SINGLE_LOCATION
-$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_SINGLE_LOCATION --purpose encryption
-# Sets the GCLOUD_TESTS_BIGTABLE_KEYRING environment variable.
-$ export GCLOUD_TESTS_BIGTABLE_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_SINGLE_LOCATION/keyRings/$MY_KEYRING
-# Create a service agent, https://cloud.google.com/bigtable/docs/use-cmek#gcloud:
-$ gcloud beta services identity create \
- --service=bigtableadmin.googleapis.com \
- --project $GCLOUD_TESTS_GOLANG_PROJECT_ID
-# Note the service agent email for the agent created.
-$ export SERVICE_AGENT_EMAIL=
-
-# Authorizes Google Cloud Bigtable to encrypt and decrypt using key1
-$ gcloud kms keys add-iam-policy-binding key1 \
- --keyring $MY_KEYRING \
- --location $MY_SINGLE_LOCATION \
- --role roles/cloudkms.cryptoKeyEncrypterDecrypter \
- --member "serviceAccount:$SERVICE_AGENT_EMAIL" \
- --project $GCLOUD_TESTS_GOLANG_PROJECT_ID
-```
-
-It may be useful to add exports to your shell initialization for future use.
-For instance, in `.zshrc`:
-
-```sh
-#### START GO SDK Test Variables
-# Developers Console project's ID (e.g. bamboo-shift-455) for the general project.
-export GCLOUD_TESTS_GOLANG_PROJECT_ID=your-project
-
-# Developers Console project's ID (e.g. bamboo-shift-455) for the Bigtable project.
-export GCLOUD_TESTS_GOLANG_SECONDARY_BIGTABLE_PROJECT_ID=your-bigtable-optional-secondary-project
-
-# The path to the JSON key file of the general project's service account.
-export GCLOUD_TESTS_GOLANG_KEY=~/directory/your-project-abcd1234.json
-
-# Comma separated list of developer's Datastore databases. If not provided,
-# default database i.e. empty string is used.
-export GCLOUD_TESTS_GOLANG_DATASTORE_DATABASES=your-database-1,your-database-2
-
-# Developers Console project's ID (e.g. doorway-cliff-677) for the Firestore project.
-export GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID=your-firestore-project
-
-# Comma separated list of developer's Firestore databases. If not provided, default database is used.
-export GCLOUD_TESTS_GOLANG_FIRESTORE_DATABASES=your-database-1,your-database-2
-
-# The path to the JSON key file of the Firestore project's service account.
-export GCLOUD_TESTS_GOLANG_FIRESTORE_KEY=~/directory/your-firestore-project-abcd1234.json
-
-# The full name of the keyring for the tests, in the form "projects/P/locations/L/keyRings/R".
-# The creation of this is described below.
-export MY_KEYRING=my-golang-sdk-test
-export MY_LOCATION=global
-export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING
-
-# API key for using the Translate API.
-export GCLOUD_TESTS_API_KEY=abcdefghijk123456789
-
-# Compute Engine zone. (https://cloud.google.com/compute/docs/regions-zones)
-export GCLOUD_TESTS_GOLANG_ZONE=your-chosen-region
-#### END GO SDK Test Variables
-```
-
-#### Running
-
-Once you've done the necessary setup, you can run the integration tests by
-running:
-
-``` sh
-$ go test -v ./...
-```
-
-Note that the above command will not run the tests in other modules. To run
-tests on other modules, first navigate to the appropriate
-subdirectory. For instance, to run only the tests for datastore:
-``` sh
-$ cd datastore
-$ go test -v ./...
-```
-
-#### Replay
-
-Some packages can record the RPCs during integration tests to a file for
-subsequent replay. To record, pass the `-record` flag to `go test`. The
-recording will be saved to the _package_`.replay` file. To replay integration
-tests from a saved recording, the replay file must be present, the `-short`
-flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY`
-environment variable must have a non-empty value.
-
-## Contributor License Agreements
-
-Before we can accept your pull requests you'll need to sign a Contributor
-License Agreement (CLA):
-
-- **If you are an individual writing original source code** and **you own the
-intellectual property**, then you'll need to sign an [individual CLA][indvcla].
-- **If you work for a company that wants to allow you to contribute your
-work**, then you'll need to sign a [corporate CLA][corpcla].
-
-You can sign these electronically (just scroll to the bottom). After that,
-we'll be able to accept your pull requests.
-
-## Contributor Code of Conduct
-
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](https://contributor-covenant.org), version 1.2.0,
-available at [https://contributor-covenant.org/version/1/2/0/](https://contributor-covenant.org/version/1/2/0/)
-
-[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
-[indvcla]: https://developers.google.com/open-source/cla/individual
-[corpcla]: https://developers.google.com/open-source/cla/corporate
-[`third_party`]: https://opensource.google/documentation/reference/thirdparty
diff --git a/upstream/vendor/cloud.google.com/go/README.md b/upstream/vendor/cloud.google.com/go/README.md
deleted file mode 100644
index fc58181fa9b..00000000000
--- a/upstream/vendor/cloud.google.com/go/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# Google Cloud Client Libraries for Go
-
-[](https://pkg.go.dev/cloud.google.com/go)
-
-Go packages for [Google Cloud Platform](https://cloud.google.com) services.
-
-``` go
-import "cloud.google.com/go"
-```
-
-To install the packages on your system, *do not clone the repo*. Instead:
-
-1. Change to your project directory: `cd /my/cloud/project`
-1. Get the package you want to use. Some products have their own module, so it's
- best to `go get` the package(s) you want to use:
-
-```bash
-go get cloud.google.com/go/firestore # Replace with the package you want to use.
-```
-
-**NOTE:** Some of these packages are under development, and may occasionally
-make backwards-incompatible changes.
-
-## Supported APIs
-
-For an updated list of all of our released APIs please see our
-[reference docs](https://cloud.google.com/go/docs/reference).
-
-## [Go Versions Supported](#supported-versions)
-
-Our libraries are compatible with at least the three most recent, major Go
-releases. They are currently compatible with:
-
-- Go 1.22
-- Go 1.21
-- Go 1.20
-- Go 1.19
-
-## Authorization
-
-By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials)
-for authorization credentials used in calling the API endpoints. This will allow your
-application to run in many environments without requiring explicit configuration.
-
-```go
-client, err := storage.NewClient(ctx)
-```
-
-To authorize using a
-[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
-pass
-[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile)
-to the `NewClient` function of the desired package. For example:
-
-```go
-client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json"))
-```
-
-You can exert more control over authorization by using the
-[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to
-create an `oauth2.TokenSource`. Then pass
-[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource)
-to the `NewClient` function:
-
-```go
-tokenSource := ...
-client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
-```
-
-## Contributing
-
-Contributions are welcome. Please, see the
-[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
-document for details.
-
-Please note that this project is released with a Contributor Code of Conduct.
-By participating in this project you agree to abide by its terms.
-See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
-for more information.
-
-## Links
-
-- [Go on Google Cloud](https://cloud.google.com/go/home)
-- [Getting started with Go on Google Cloud](https://cloud.google.com/go/getting-started)
-- [App Engine Quickstart](https://cloud.google.com/appengine/docs/standard/go/quickstart)
-- [Cloud Functions Quickstart](https://cloud.google.com/functions/docs/quickstart-go)
-- [Cloud Run Quickstart](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#go)
diff --git a/upstream/vendor/cloud.google.com/go/RELEASING.md b/upstream/vendor/cloud.google.com/go/RELEASING.md
deleted file mode 100644
index 6d0fcf4f9f9..00000000000
--- a/upstream/vendor/cloud.google.com/go/RELEASING.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# Releasing
-
-## Determine which module to release
-
-The Go client libraries have several modules. Each module does not strictly
-correspond to a single library - they correspond to trees of directories. If a
-file needs to be released, you must release the closest ancestor module.
-
-To see all modules:
-
-```bash
-$ cat `find . -name go.mod` | grep module
-module cloud.google.com/go/pubsub
-module cloud.google.com/go/spanner
-module cloud.google.com/go
-module cloud.google.com/go/bigtable
-module cloud.google.com/go/bigquery
-module cloud.google.com/go/storage
-module cloud.google.com/go/pubsublite
-module cloud.google.com/go/firestore
-module cloud.google.com/go/logging
-module cloud.google.com/go/internal/gapicgen
-module cloud.google.com/go/internal/godocfx
-module cloud.google.com/go/internal/examples/fake
-module cloud.google.com/go/internal/examples/mock
-module cloud.google.com/go/datastore
-```
-
-The `cloud.google.com/go` is the repository root module. Each other module is
-a submodule.
-
-So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest
-ancestor module is `cloud.google.com/go/bigtable` - so you should release a new
-version of the `cloud.google.com/go/bigtable` submodule.
-
-If you need to release a change in `asset/apiv1/asset_client.go`, the closest
-ancestor module is `cloud.google.com/go` - so you should release a new version
-of the `cloud.google.com/go` repository root module. Note: releasing
-`cloud.google.com/go` has no impact on any of the submodules, and vice-versa.
-They are released entirely independently.
-
-## Test failures
-
-If there are any test failures in the Kokoro build, releases are blocked until
-the failures have been resolved.
-
-## How to release
-
-### Automated Releases (`cloud.google.com/go` and submodules)
-
-We now use [release-please](https://github.com/googleapis/release-please) to
-perform automated releases for `cloud.google.com/go` and all submodules.
-
-1. If there are changes that have not yet been released, a
- [pull request](https://github.com/googleapis/google-cloud-go/pull/2971) will
- be automatically opened by release-please
- with a title like "chore: release X.Y.Z" (for the root module) or
- "chore: release datastore X.Y.Z" (for the datastore submodule), where X.Y.Z
- is the next version to be released. Find the desired pull request
- [here](https://github.com/googleapis/google-cloud-go/pulls)
-1. Check for failures in the
- [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
- any failures in the most recent build, address them before proceeding with
- the release. (This applies even if the failures are in a different submodule
- from the one being released.)
-1. Review the release notes. These are automatically generated from the titles
- of any merged commits since the previous release. If you would like to edit
- them, this can be done by updating the changes in the release PR.
-1. To cut a release, approve and merge the pull request. Doing so will
- update the `CHANGES.md`, tag the merged commit with the appropriate version,
- and draft a GitHub release which will copy the notes from `CHANGES.md`.
-
-### Manual Release (`cloud.google.com/go`)
-
-If for whatever reason the automated release process is not working as expected,
-here is how to manually cut a release of `cloud.google.com/go`.
-
-1. Check for failures in the
- [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
- any failures in the most recent build, address them before proceeding with
- the release.
-1. Navigate to `google-cloud-go/` and switch to main.
-1. `git pull`
-1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases.
- The current latest tag `$CV` is the largest tag. It should look something
- like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a
- specific library, not the module root). We'll call the current version `$CV`
- and the new version `$NV`.
-1. On main, run `git log $CV...` to list all the changes since the last
- release. NOTE: You must manually visually parse out changes to submodules [1]
- (the `git log` is going to show you things in submodules, which are not going
- to be part of your release).
-1. Edit `CHANGES.md` to include a summary of the changes.
-1. In `internal/version/version.go`, update `const Repo` to today's date with
- the format `YYYYMMDD`.
-1. In `internal/version` run `go generate`.
-1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
- and create a PR titled `chore: release $NV`.
-1. Wait for the PR to be reviewed and merged. Once it's merged, and without
- merging any other PRs in the meantime:
- a. Switch to main.
- b. `git pull`
- c. Tag the repo with the next version: `git tag $NV`.
- d. Push the tag to origin:
- `git push origin $NV`
-1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
- with the new release, copying the contents of `CHANGES.md`.
-
-### Manual Releases (submodules)
-
-If for whatever reason the automated release process is not working as expected,
-here is how to manually cut a release of a submodule.
-
-(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
-
-1. Check for failures in the
- [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are
- any failures in the most recent build, address them before proceeding with
- the release. (This applies even if the failures are in a different submodule
- from the one being released.)
-1. Navigate to `google-cloud-go/` and switch to main.
-1. `git pull`
-1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all
- existing releases. The current latest tag `$CV` is the largest tag. It
- should look something like `datastore/vX.Y.Z`. We'll call the current version
- `$CV` and the new version `$NV`.
-1. On main, run `git log $CV.. -- datastore/` to list all the changes to the
- submodule directory since the last release.
-1. Edit `datastore/CHANGES.md` to include a summary of the changes.
-1. In `internal/version` run `go generate`.
-1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork,
- and create a PR titled `chore(datastore): release $NV`.
-1. Wait for the PR to be reviewed and merged. Once it's merged, and without
- merging any other PRs in the meantime:
- a. Switch to main.
- b. `git pull`
- c. Tag the repo with the next version: `git tag $NV`.
- d. Push the tag to origin:
- `git push origin $NV`
-1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
- with the new release, copying the contents of `datastore/CHANGES.md`.
diff --git a/upstream/vendor/cloud.google.com/go/SECURITY.md b/upstream/vendor/cloud.google.com/go/SECURITY.md
deleted file mode 100644
index 8b58ae9c01a..00000000000
--- a/upstream/vendor/cloud.google.com/go/SECURITY.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Security Policy
-
-To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
-
-The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
-
-We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/upstream/vendor/cloud.google.com/go/auth/CHANGES.md b/upstream/vendor/cloud.google.com/go/auth/CHANGES.md
deleted file mode 100644
index f1b1a033e88..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/CHANGES.md
+++ /dev/null
@@ -1,125 +0,0 @@
-# Changelog
-
-## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09)
-
-
-### Bug Fixes
-
-* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc))
-
-## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07)
-
-
-### Features
-
-* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c))
-
-
-### Bug Fixes
-
-* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06))
-
-## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23)
-
-
-### Features
-
-* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814)
-
-
-### Bug Fixes
-
-* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809)
-
-## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
-
-
-### Bug Fixes
-
-* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
-* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
-
-## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
-
-
-### Bug Fixes
-
-* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
-
-## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
-
-### Breaking Changes
-
-In the below mentioned commits there were a few large breaking changes since the
-last release of the module.
-
-1. The `Credentials` type has been moved to the root of the module as it is
- becoming the core abstraction for the whole module.
-2. Because of the above mentioned change many functions that previously
- returned a `TokenProvider` now return `Credentials`. Similarly, these
- functions have been renamed to be more specific.
-3. Most places that used to take an optional `TokenProvider` now accept
- `Credentials`. You can make a `Credentials` from a `TokenProvider` using the
- constructor found in the `auth` package.
-4. The `detect` package has been renamed to `credentials`. With this change some
- function signatures were also updated for better readability.
-5. Derivative auth flows like `impersonate` and `downscope` have been moved to
- be under the new `credentials` package.
-
-Although these changes are disruptive we think that they are for the best of the
-long-term health of the module. We do not expect any more large breaking changes
-like these in future revisions, even before 1.0.0. This version will be the
-first version of the auth library that our client libraries start to use and
-depend on.
-
-### Features
-
-* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6))
-* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d))
-* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670)
-* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501))
-* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac))
-* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d))
-* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
-* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd))
-* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824))
-* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9))
-* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10)
-
-
-### Bug Fixes
-
-* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136)
-* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
-* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
-
-## 0.1.0 (2023-10-18)
-
-
-### Features
-
-* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e))
-* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993))
-* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8))
-* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7))
-* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c))
-* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5))
-* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff))
-* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe))
-* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04))
-* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507))
-* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/upstream/vendor/cloud.google.com/go/auth/README.md b/upstream/vendor/cloud.google.com/go/auth/README.md
deleted file mode 100644
index 36de276a074..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# auth
-
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
diff --git a/upstream/vendor/cloud.google.com/go/auth/auth.go b/upstream/vendor/cloud.google.com/go/auth/auth.go
deleted file mode 100644
index ea7c1b0ad8d..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/auth.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-const (
- // Parameter keys for AuthCodeURL method to support PKCE.
- codeChallengeKey = "code_challenge"
- codeChallengeMethodKey = "code_challenge_method"
-
- // Parameter key for Exchange method to support PKCE.
- codeVerifierKey = "code_verifier"
-
- // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
- // so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
- defaultExpiryDelta = 215 * time.Second
-
- universeDomainDefault = "googleapis.com"
-)
-
-var (
- defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
- defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType}
-
- // for testing
- timeNow = time.Now
-)
-
-// TokenProvider specifies an interface for anything that can return a token.
-type TokenProvider interface {
- // Token returns a Token or an error.
- // The Token returned must be safe to use
- // concurrently.
- // The returned Token must not be modified.
- // The context provided must be sent along to any requests that are made in
- // the implementing code.
- Token(context.Context) (*Token, error)
-}
-
-// Token holds the credential token used to authorized requests. All fields are
-// considered read-only.
-type Token struct {
- // Value is the token used to authorize requests. It is usually an access
- // token but may be other types of tokens such as ID tokens in some flows.
- Value string
- // Type is the type of token Value is. If uninitialized, it should be
- // assumed to be a "Bearer" token.
- Type string
- // Expiry is the time the token is set to expire.
- Expiry time.Time
- // Metadata may include, but is not limited to, the body of the token
- // response returned by the server.
- Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values
-}
-
-// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not
-// expired. A token is considered expired if [Token.Expiry] has passed or will
-// pass in the next 10 seconds.
-func (t *Token) IsValid() bool {
- return t.isValidWithEarlyExpiry(defaultExpiryDelta)
-}
-
-func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
- if t == nil || t.Value == "" {
- return false
- }
- if t.Expiry.IsZero() {
- return true
- }
- return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow())
-}
-
-// Credentials holds Google credentials, including
-// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
-type Credentials struct {
- json []byte
- projectID CredentialsPropertyProvider
- quotaProjectID CredentialsPropertyProvider
- // universeDomain is the default service domain for a given Cloud universe.
- universeDomain CredentialsPropertyProvider
-
- TokenProvider
-}
-
-// JSON returns the bytes associated with the the file used to source
-// credentials if one was used.
-func (c *Credentials) JSON() []byte {
- return c.json
-}
-
-// ProjectID returns the associated project ID from the underlying file or
-// environment.
-func (c *Credentials) ProjectID(ctx context.Context) (string, error) {
- if c.projectID == nil {
- return internal.GetProjectID(c.json, ""), nil
- }
- v, err := c.projectID.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- return internal.GetProjectID(c.json, v), nil
-}
-
-// QuotaProjectID returns the associated quota project ID from the underlying
-// file or environment.
-func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) {
- if c.quotaProjectID == nil {
- return internal.GetQuotaProject(c.json, ""), nil
- }
- v, err := c.quotaProjectID.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- return internal.GetQuotaProject(c.json, v), nil
-}
-
-// UniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com".
-func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) {
- if c.universeDomain == nil {
- return universeDomainDefault, nil
- }
- v, err := c.universeDomain.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- if v == "" {
- return universeDomainDefault, nil
- }
- return v, err
-}
-
-// CredentialsPropertyProvider provides an implementation to fetch a property
-// value for [Credentials].
-type CredentialsPropertyProvider interface {
- GetProperty(context.Context) (string, error)
-}
-
-// CredentialsPropertyFunc is a type adapter to allow the use of ordinary
-// functions as a [CredentialsPropertyProvider].
-type CredentialsPropertyFunc func(context.Context) (string, error)
-
-// GetProperty loads the properly value provided the given context.
-func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) {
- return p(ctx)
-}
-
-// CredentialsOptions are used to configure [Credentials].
-type CredentialsOptions struct {
- // TokenProvider is a means of sourcing a token for the credentials. Required.
- TokenProvider TokenProvider
- // JSON is the raw contents of the credentials file if sourced from a file.
- JSON []byte
- // ProjectIDProvider resolves the project ID associated with the
- // credentials.
- ProjectIDProvider CredentialsPropertyProvider
- // QuotaProjectIDProvider resolves the quota project ID associated with the
- // credentials.
- QuotaProjectIDProvider CredentialsPropertyProvider
- // UniverseDomainProvider resolves the universe domain with the credentials.
- UniverseDomainProvider CredentialsPropertyProvider
-}
-
-// NewCredentials returns new [Credentials] from the provided options. Most users
-// will want to build this object a function from the
-// [cloud.google.com/go/auth/credentials] package.
-func NewCredentials(opts *CredentialsOptions) *Credentials {
- creds := &Credentials{
- TokenProvider: opts.TokenProvider,
- json: opts.JSON,
- projectID: opts.ProjectIDProvider,
- quotaProjectID: opts.QuotaProjectIDProvider,
- universeDomain: opts.UniverseDomainProvider,
- }
-
- return creds
-}
-
-// CachedTokenProviderOptions provided options for configuring a
-// CachedTokenProvider.
-type CachedTokenProviderOptions struct {
- // DisableAutoRefresh makes the TokenProvider always return the same token,
- // even if it is expired.
- DisableAutoRefresh bool
- // ExpireEarly configures the amount of time before a token expires, that it
- // should be refreshed. If unset, the default value is 10 seconds.
- ExpireEarly time.Duration
-}
-
-func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
- if ctpo == nil {
- return true
- }
- return !ctpo.DisableAutoRefresh
-}
-
-func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
- if ctpo == nil {
- return defaultExpiryDelta
- }
- return ctpo.ExpireEarly
-}
-
-// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
-// by the underlying provider. By default it will refresh tokens ten seconds
-// before they expire, but this time can be configured with the optional
-// options.
-func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
- if ctp, ok := tp.(*cachedTokenProvider); ok {
- return ctp
- }
- return &cachedTokenProvider{
- tp: tp,
- autoRefresh: opts.autoRefresh(),
- expireEarly: opts.expireEarly(),
- }
-}
-
-type cachedTokenProvider struct {
- tp TokenProvider
- autoRefresh bool
- expireEarly time.Duration
-
- mu sync.Mutex
- cachedToken *Token
-}
-
-func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.cachedToken.IsValid() || !c.autoRefresh {
- return c.cachedToken, nil
- }
- t, err := c.tp.Token(ctx)
- if err != nil {
- return nil, err
- }
- c.cachedToken = t
- return t, nil
-}
-
-// Error is a error associated with retrieving a [Token]. It can hold useful
-// additional details for debugging.
-type Error struct {
- // Response is the HTTP response associated with error. The body will always
- // be already closed and consumed.
- Response *http.Response
- // Body is the HTTP response body.
- Body []byte
- // Err is the underlying wrapped error.
- Err error
-
- // code returned in the token response
- code string
- // description returned in the token response
- description string
- // uri returned in the token response
- uri string
-}
-
-func (e *Error) Error() string {
- if e.code != "" {
- s := fmt.Sprintf("auth: %q", e.code)
- if e.description != "" {
- s += fmt.Sprintf(" %q", e.description)
- }
- if e.uri != "" {
- s += fmt.Sprintf(" %q", e.uri)
- }
- return s
- }
- return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body)
-}
-
-// Temporary returns true if the error is considered temporary and may be able
-// to be retried.
-func (e *Error) Temporary() bool {
- if e.Response == nil {
- return false
- }
- sc := e.Response.StatusCode
- return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests
-}
-
-func (e *Error) Unwrap() error {
- return e.Err
-}
-
-// Style describes how the token endpoint wants to receive the ClientID and
-// ClientSecret.
-type Style int
-
-const (
- // StyleUnknown means the value has not been initiated. Sending this in
- // a request will cause the token exchange to fail.
- StyleUnknown Style = iota
- // StyleInParams sends client info in the body of a POST request.
- StyleInParams
- // StyleInHeader sends client info using Basic Authorization header.
- StyleInHeader
-)
-
-// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow.
-type Options2LO struct {
- // Email is the OAuth2 client ID. This value is set as the "iss" in the
- // JWT.
- Email string
- // PrivateKey contains the contents of an RSA private key or the
- // contents of a PEM file that contains a private key. It is used to sign
- // the JWT created.
- PrivateKey []byte
- // TokenURL is th URL the JWT is sent to. Required.
- TokenURL string
- // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the
- // "kid" in the JWT header. Optional.
- PrivateKeyID string
- // Subject is the used for to impersonate a user. It is used as the "sub" in
- // the JWT.m Optional.
- Subject string
- // Scopes specifies requested permissions for the token. Optional.
- Scopes []string
- // Expires specifies the lifetime of the token. Optional.
- Expires time.Duration
- // Audience specifies the "aud" in the JWT. Optional.
- Audience string
- // PrivateClaims allows specifying any custom claims for the JWT. Optional.
- PrivateClaims map[string]interface{}
-
- // Client is the client to be used to make the underlying token requests.
- // Optional.
- Client *http.Client
- // UseIDToken requests that the token returned be an ID token if one is
- // returned from the server. Optional.
- UseIDToken bool
-}
-
-func (o *Options2LO) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-func (o *Options2LO) validate() error {
- if o == nil {
- return errors.New("auth: options must be provided")
- }
- if o.Email == "" {
- return errors.New("auth: email must be provided")
- }
- if len(o.PrivateKey) == 0 {
- return errors.New("auth: private key must be provided")
- }
- if o.TokenURL == "" {
- return errors.New("auth: token URL must be provided")
- }
- return nil
-}
-
-// New2LOTokenProvider returns a [TokenProvider] from the provided options.
-func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
-}
-
-type tokenProvider2LO struct {
- opts *Options2LO
- Client *http.Client
-}
-
-func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
- pk, err := internal.ParseKey(tp.opts.PrivateKey)
- if err != nil {
- return nil, err
- }
- claimSet := &jwt.Claims{
- Iss: tp.opts.Email,
- Scope: strings.Join(tp.opts.Scopes, " "),
- Aud: tp.opts.TokenURL,
- AdditionalClaims: tp.opts.PrivateClaims,
- Sub: tp.opts.Subject,
- }
- if t := tp.opts.Expires; t > 0 {
- claimSet.Exp = time.Now().Add(t).Unix()
- }
- if aud := tp.opts.Audience; aud != "" {
- claimSet.Aud = aud
- }
- h := *defaultHeader
- h.KeyID = tp.opts.PrivateKeyID
- payload, err := jwt.EncodeJWS(&h, claimSet, pk)
- if err != nil {
- return nil, err
- }
- v := url.Values{}
- v.Set("grant_type", defaultGrantType)
- v.Set("assertion", payload)
- resp, err := tp.Client.PostForm(tp.opts.TokenURL, v)
- if err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return nil, &Error{
- Response: resp,
- Body: body,
- }
- }
- // tokenRes is the JSON response body.
- var tokenRes struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- IDToken string `json:"id_token"`
- ExpiresIn int64 `json:"expires_in"`
- }
- if err := json.Unmarshal(body, &tokenRes); err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- token := &Token{
- Value: tokenRes.AccessToken,
- Type: tokenRes.TokenType,
- }
- token.Metadata = make(map[string]interface{})
- json.Unmarshal(body, &token.Metadata) // no error checks for optional fields
-
- if secs := tokenRes.ExpiresIn; secs > 0 {
- token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
- }
- if v := tokenRes.IDToken; v != "" {
- // decode returned id token to get expiry
- claimSet, err := jwt.DecodeJWS(v)
- if err != nil {
- return nil, fmt.Errorf("auth: error decoding JWT token: %w", err)
- }
- token.Expiry = time.Unix(claimSet.Exp, 0)
- }
- if tp.opts.UseIDToken {
- if tokenRes.IDToken == "" {
- return nil, fmt.Errorf("auth: response doesn't have JWT token")
- }
- token.Value = tokenRes.IDToken
- }
- return token, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/compute.go b/upstream/vendor/cloud.google.com/go/auth/credentials/compute.go
deleted file mode 100644
index 6db643837e2..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/compute.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/url"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
-)
-
-var (
- computeTokenMetadata = map[string]interface{}{
- "auth.google.tokenSource": "compute-metadata",
- "auth.google.serviceAccount": "default",
- }
- computeTokenURI = "instance/service-accounts/default/token"
-)
-
-// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
-// uses the metadata service to retrieve tokens.
-func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider {
- return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{
- ExpireEarly: earlyExpiry,
- })
-}
-
-// computeProvider fetches tokens from the google cloud metadata service.
-type computeProvider struct {
- scopes []string
-}
-
-type metadataTokenResp struct {
- AccessToken string `json:"access_token"`
- ExpiresInSec int `json:"expires_in"`
- TokenType string `json:"token_type"`
-}
-
-func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
- tokenURI, err := url.Parse(computeTokenURI)
- if err != nil {
- return nil, err
- }
- if len(cs.scopes) > 0 {
- v := url.Values{}
- v.Set("scopes", strings.Join(cs.scopes, ","))
- tokenURI.RawQuery = v.Encode()
- }
- tokenJSON, err := metadata.Get(tokenURI.String())
- if err != nil {
- return nil, err
- }
- var res metadataTokenResp
- if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {
- return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err)
- }
- if res.ExpiresInSec == 0 || res.AccessToken == "" {
- return nil, errors.New("credentials: incomplete token received from metadata")
- }
- return &auth.Token{
- Value: res.AccessToken,
- Type: res.TokenType,
- Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
- Metadata: computeTokenMetadata,
- }, nil
-
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/detect.go b/upstream/vendor/cloud.google.com/go/auth/credentials/detect.go
deleted file mode 100644
index cb3f44f5873..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow.
- jwtTokenURL = "https://oauth2.googleapis.com/token"
-
- // Google's OAuth 2.0 default endpoints.
- googleAuthURL = "https://accounts.google.com/o/oauth2/auth"
- googleTokenURL = "https://oauth2.googleapis.com/token"
-
- // Help on default credentials
- adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
-)
-
-var (
- // for testing
- allowOnGCECheck = true
-)
-
-// OnGCE reports whether this process is running in Google Cloud.
-func OnGCE() bool {
- // TODO(codyoss): once all libs use this auth lib move metadata check here
- return allowOnGCECheck && metadata.OnGCE()
-}
-
-// DetectDefault searches for "Application Default Credentials" and returns
-// a credential based on the [DetectOptions] provided.
-//
-// It looks for credentials in the following places, preferring the first
-// location found:
-//
-// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS
-// environment variable. For workload identity federation, refer to
-// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation
-// on how to generate the JSON configuration file for on-prem/non-Google
-// cloud platforms.
-// - A JSON file in a location known to the gcloud command-line tool. On
-// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On
-// other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// - On Google Compute Engine, Google App Engine standard second generation
-// runtimes, and Google App Engine flexible environment, it fetches
-// credentials from the metadata server.
-func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- if opts.CredentialsJSON != nil {
- return readCredentialsFileJSON(opts.CredentialsJSON, opts)
- }
- if opts.CredentialsFile != "" {
- return readCredentialsFile(opts.CredentialsFile, opts)
- }
- if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" {
- if creds, err := readCredentialsFile(filename, opts); err == nil {
- return creds, err
- }
- }
-
- fileName := credsfile.GetWellKnownFileName()
- if b, err := os.ReadFile(fileName); err == nil {
- return readCredentialsFileJSON(b, opts)
- }
-
- if OnGCE() {
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...),
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
- return metadata.ProjectID()
- }),
- UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
- }), nil
- }
-
- return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL)
-}
-
-// DetectOptions provides configuration for [DetectDefault].
-type DetectOptions struct {
- // Scopes that credentials tokens should have. Example:
- // https://www.googleapis.com/auth/cloud-platform. Required if Audience is
- // not provided.
- Scopes []string
- // Audience that credentials tokens should have. Only applicable for 2LO
- // flows with service accounts. If specified, scopes should not be provided.
- Audience string
- // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority).
- // Optional.
- Subject string
- // EarlyTokenRefresh configures how early before a token expires that it
- // should be refreshed.
- EarlyTokenRefresh time.Duration
- // AuthHandlerOptions configures an authorization handler and other options
- // for 3LO flows. It is required, and only used, for client credential
- // flows.
- AuthHandlerOptions *auth.AuthorizationHandlerOptions
- // TokenURL allows to set the token endpoint for user credential flows. If
- // unset the default value is: https://oauth2.googleapis.com/token.
- // Optional.
- TokenURL string
- // STSAudience is the audience sent to when retrieving an STS token.
- // Currently this only used for GDCH auth flow, for which it is required.
- STSAudience string
- // CredentialsFile overrides detection logic and sources a credential file
- // from the provided filepath. If provided, CredentialsJSON must not be.
- // Optional.
- CredentialsFile string
- // CredentialsJSON overrides detection logic and uses the JSON bytes as the
- // source for the credential. If provided, CredentialsFile must not be.
- // Optional.
- CredentialsJSON []byte
- // UseSelfSignedJWT directs service account based credentials to create a
- // self-signed JWT with the private key found in the file, skipping any
- // network requests that would normally be made. Optional.
- UseSelfSignedJWT bool
- // Client configures the underlying client used to make network requests
- // when fetching tokens. Optional.
- Client *http.Client
- // UniverseDomain is the default service domain for a given Cloud universe.
- // The default value is "googleapis.com". This option is ignored for
- // authentication flows that do not support universe domain. Optional.
- UniverseDomain string
-}
-
-func (o *DetectOptions) validate() error {
- if o == nil {
- return errors.New("credentials: options must be provided")
- }
- if len(o.Scopes) > 0 && o.Audience != "" {
- return errors.New("credentials: both scopes and audience were provided")
- }
- if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" {
- return errors.New("credentials: both credentials file and JSON were provided")
- }
- return nil
-}
-
-func (o *DetectOptions) tokenURL() string {
- if o.TokenURL != "" {
- return o.TokenURL
- }
- return googleTokenURL
-}
-
-func (o *DetectOptions) scopes() []string {
- scopes := make([]string, len(o.Scopes))
- copy(scopes, o.Scopes)
- return scopes
-}
-
-func (o *DetectOptions) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
- b, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return readCredentialsFileJSON(b, opts)
-}
-
-func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
- // attempt to parse jsonData as a Google Developers Console client_credentials.json.
- config := clientCredConfigFromJSON(b, opts)
- if config != nil {
- if config.AuthHandlerOpts == nil {
- return nil, errors.New("credentials: auth handler must be specified for this credential filetype")
- }
- tp, err := auth.New3LOTokenProvider(config)
- if err != nil {
- return nil, err
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: tp,
- JSON: b,
- }), nil
- }
- return fileCredentials(b, opts)
-}
-
-func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
- var creds credsfile.ClientCredentialsFile
- var c *credsfile.Config3LO
- if err := json.Unmarshal(b, &creds); err != nil {
- return nil
- }
- switch {
- case creds.Web != nil:
- c = creds.Web
- case creds.Installed != nil:
- c = creds.Installed
- default:
- return nil
- }
- if len(c.RedirectURIs) < 1 {
- return nil
- }
- var handleOpts *auth.AuthorizationHandlerOptions
- if opts.AuthHandlerOptions != nil {
- handleOpts = &auth.AuthorizationHandlerOptions{
- Handler: opts.AuthHandlerOptions.Handler,
- State: opts.AuthHandlerOptions.State,
- PKCEOpts: opts.AuthHandlerOptions.PKCEOpts,
- }
- }
- return &auth.Options3LO{
- ClientID: c.ClientID,
- ClientSecret: c.ClientSecret,
- RedirectURL: c.RedirectURIs[0],
- Scopes: opts.scopes(),
- AuthURL: c.AuthURI,
- TokenURL: c.TokenURI,
- Client: opts.client(),
- EarlyTokenExpiry: opts.EarlyTokenRefresh,
- AuthHandlerOpts: handleOpts,
- // TODO(codyoss): refactor this out. We need to add in auto-detection
- // for this use case.
- AuthStyle: auth.StyleInParams,
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/doc.go b/upstream/vendor/cloud.google.com/go/auth/credentials/doc.go
deleted file mode 100644
index 1dbb2866b91..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credentials provides support for making OAuth2 authorized and
-// authenticated HTTP requests to Google APIs. It supports the Web server flow,
-// client-side credentials, service accounts, Google Compute Engine service
-// accounts, Google App Engine service accounts and workload identity federation
-// from non-Google cloud platforms.
-//
-// A brief overview of the package follows. For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-// For more information on using workload identity federation, refer to
-// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
-//
-// # Credentials
-//
-// The [cloud.google.com/go/auth.Credentials] type represents Google
-// credentials, including Application Default Credentials.
-//
-// Use [DetectDefault] to obtain Application Default Credentials.
-//
-// Application Default Credentials support workload identity federation to
-// access Google Cloud resources from non-Google Cloud platforms including Amazon
-// Web Services (AWS), Microsoft Azure or any identity provider that supports
-// OpenID Connect (OIDC). Workload identity federation is recommended for
-// non-Google Cloud environments as it avoids the need to download, manage, and
-// store service account private keys locally.
-//
-// # Workforce Identity Federation
-//
-// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount].
-package credentials
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/upstream/vendor/cloud.google.com/go/auth/credentials/filetypes.go
deleted file mode 100644
index a66e56d70f8..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "errors"
- "fmt"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/externalaccount"
- "cloud.google.com/go/auth/credentials/internal/externalaccountuser"
- "cloud.google.com/go/auth/credentials/internal/gdch"
- "cloud.google.com/go/auth/credentials/internal/impersonate"
- internalauth "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
- fileType, err := credsfile.ParseFileType(b)
- if err != nil {
- return nil, err
- }
-
- var projectID, quotaProjectID, universeDomain string
- var tp auth.TokenProvider
- switch fileType {
- case credsfile.ServiceAccountKey:
- f, err := credsfile.ParseServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- projectID = f.ProjectID
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.UserCredentialsKey:
- f, err := credsfile.ParseUserCredentials(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleUserCredential(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = f.UniverseDomain
- case credsfile.ExternalAccountKey:
- f, err := credsfile.ParseExternalAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleExternalAccount(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.ExternalAccountAuthorizedUserKey:
- f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleExternalAccountAuthorizedUser(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = f.UniverseDomain
- case credsfile.ImpersonatedServiceAccountKey:
- f, err := credsfile.ParseImpersonatedServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleImpersonatedServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.GDCHServiceAccountKey:
- f, err := credsfile.ParseGDCHServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleGDCHServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- projectID = f.Project
- universeDomain = f.UniverseDomain
- default:
- return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType)
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenRefresh,
- }),
- JSON: b,
- ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
- QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
- UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
- }), nil
-}
-
-// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to
-// support configuring universe-specific credentials in code. Auth flows
-// unsupported for universe domain should not use this func, but should instead
-// simply set the file universe domain on the credentials.
-func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string {
- if optsUniverseDomain != "" {
- return optsUniverseDomain
- }
- return fileUniverseDomain
-}
-
-func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- if opts.UseSelfSignedJWT {
- return configureSelfSignedJWT(f, opts)
- }
- opts2LO := &auth.Options2LO{
- Email: f.ClientEmail,
- PrivateKey: []byte(f.PrivateKey),
- PrivateKeyID: f.PrivateKeyID,
- Scopes: opts.scopes(),
- TokenURL: f.TokenURL,
- Subject: opts.Subject,
- }
- if opts2LO.TokenURL == "" {
- opts2LO.TokenURL = jwtTokenURL
- }
- return auth.New2LOTokenProvider(opts2LO)
-}
-
-func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
- opts3LO := &auth.Options3LO{
- ClientID: f.ClientID,
- ClientSecret: f.ClientSecret,
- Scopes: opts.scopes(),
- AuthURL: googleAuthURL,
- TokenURL: opts.tokenURL(),
- AuthStyle: auth.StyleInParams,
- EarlyTokenExpiry: opts.EarlyTokenRefresh,
- RefreshToken: f.RefreshToken,
- }
- return auth.New3LOTokenProvider(opts3LO)
-}
-
-func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- externalOpts := &externalaccount.Options{
- Audience: f.Audience,
- SubjectTokenType: f.SubjectTokenType,
- TokenURL: f.TokenURL,
- TokenInfoURL: f.TokenInfoURL,
- ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL,
- ClientSecret: f.ClientSecret,
- ClientID: f.ClientID,
- CredentialSource: f.CredentialSource,
- QuotaProjectID: f.QuotaProjectID,
- Scopes: opts.scopes(),
- WorkforcePoolUserProject: f.WorkforcePoolUserProject,
- Client: opts.client(),
- }
- if f.ServiceAccountImpersonation != nil {
- externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
- }
- return externalaccount.NewTokenProvider(externalOpts)
-}
-
-func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
- externalOpts := &externalaccountuser.Options{
- Audience: f.Audience,
- RefreshToken: f.RefreshToken,
- TokenURL: f.TokenURL,
- TokenInfoURL: f.TokenInfoURL,
- ClientID: f.ClientID,
- ClientSecret: f.ClientSecret,
- Scopes: opts.scopes(),
- Client: opts.client(),
- }
- return externalaccountuser.NewTokenProvider(externalOpts)
-}
-
-func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil {
- return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
- }
-
- tp, err := fileCredentials(f.CredSource, opts)
- if err != nil {
- return nil, err
- }
- return impersonate.NewTokenProvider(&impersonate.Options{
- URL: f.ServiceAccountImpersonationURL,
- Scopes: opts.scopes(),
- Tp: tp,
- Delegates: f.Delegates,
- Client: opts.client(),
- })
-}
-
-func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- return gdch.NewTokenProvider(f, &gdch.Options{
- STSAudience: opts.STSAudience,
- Client: opts.client(),
- })
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
deleted file mode 100644
index d9e1dcddf64..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ /dev/null
@@ -1,547 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path"
- "sort"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-var (
- // getenv aliases os.Getenv for testing
- getenv = os.Getenv
-)
-
-const (
- // AWS Signature Version 4 signing algorithm identifier.
- awsAlgorithm = "AWS4-HMAC-SHA256"
-
- // The termination string for the AWS credential scope value as defined in
- // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
- awsRequestType = "aws4_request"
-
- // The AWS authorization header name for the security session token if available.
- awsSecurityTokenHeader = "x-amz-security-token"
-
- // The name of the header containing the session token for metadata endpoint calls
- awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
-
- awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
-
- awsIMDSv2SessionTTL = "300"
-
- // The AWS authorization header name for the auto-generated date.
- awsDateHeader = "x-amz-date"
-
- defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
-
- // Supported AWS configuration environment variables.
- awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
- awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
- awsRegionEnvVar = "AWS_REGION"
- awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
- awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
-
- awsTimeFormatLong = "20060102T150405Z"
- awsTimeFormatShort = "20060102"
- awsProviderType = "aws"
-)
-
-type awsSubjectProvider struct {
- EnvironmentID string
- RegionURL string
- RegionalCredVerificationURL string
- CredVerificationURL string
- IMDSv2SessionTokenURL string
- TargetResource string
- requestSigner *awsRequestSigner
- region string
- securityCredentialsProvider AwsSecurityCredentialsProvider
- reqOpts *RequestOptions
-
- Client *http.Client
-}
-
-func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- // Set Defaults
- if sp.RegionalCredVerificationURL == "" {
- sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
- }
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
- if err != nil {
- return "", err
- }
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
- }
- }
-
- // Generate the signed request to AWS STS GetCallerIdentity API.
- // Use the required regional endpoint. Otherwise, the request will fail.
- req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
- if err != nil {
- return "", err
- }
- // The full, canonical resource name of the workload identity pool
- // provider, with or without the HTTPS prefix.
- // Including this header as part of the signature is recommended to
- // ensure data integrity.
- if sp.TargetResource != "" {
- req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource)
- }
- sp.requestSigner.signRequest(req)
-
- /*
- The GCP STS endpoint expects the headers to be formatted as:
- # [
- # {key: 'x-amz-date', value: '...'},
- # {key: 'Authorization', value: '...'},
- # ...
- # ]
- # And then serialized as:
- # quote(json.dumps({
- # url: '...',
- # method: 'POST',
- # headers: [{key: 'x-amz-date', value: '...'}, ...]
- # }))
- */
-
- awsSignedReq := awsRequest{
- URL: req.URL.String(),
- Method: "POST",
- }
- for headerKey, headerList := range req.Header {
- for _, headerValue := range headerList {
- awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{
- Key: headerKey,
- Value: headerValue,
- })
- }
- }
- sort.Slice(awsSignedReq.Headers, func(i, j int) bool {
- headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key)
- if headerCompare == 0 {
- return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0
- }
- return headerCompare < 0
- })
-
- result, err := json.Marshal(awsSignedReq)
- if err != nil {
- return "", err
- }
- return url.QueryEscape(string(result)), nil
-}
-
-func (sp *awsSubjectProvider) providerType() string {
- if sp.securityCredentialsProvider != nil {
- return programmaticProviderType
- }
- return awsProviderType
-}
-
-func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) {
- if sp.IMDSv2SessionTokenURL == "" {
- return "", nil
- }
- req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil)
- if err != nil {
- return "", err
- }
- req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody)
- }
- return string(respBody), nil
-}
-
-func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) {
- if sp.securityCredentialsProvider != nil {
- return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts)
- }
- if canRetrieveRegionFromEnvironment() {
- if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" {
- return envAwsRegion, nil
- }
- return getenv(awsDefaultRegionEnvVar), nil
- }
-
- if sp.RegionURL == "" {
- return "", errors.New("credentials: unable to determine AWS region")
- }
-
- req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil)
- if err != nil {
- return "", err
- }
-
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
-
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody)
- }
-
- // This endpoint will return the region in format: us-east-2b.
- // Only the us-east-2 part should be used.
- bodyLen := len(respBody)
- if bodyLen == 0 {
- return "", nil
- }
- return string(respBody[:bodyLen-1]), nil
-}
-
-func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) {
- if sp.securityCredentialsProvider != nil {
- return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts)
- }
- if canRetrieveSecurityCredentialFromEnvironment() {
- return &AwsSecurityCredentials{
- AccessKeyID: getenv(awsAccessKeyIDEnvVar),
- SecretAccessKey: getenv(awsSecretAccessKeyEnvVar),
- SessionToken: getenv(awsSessionTokenEnvVar),
- }, nil
- }
-
- roleName, err := sp.getMetadataRoleName(ctx, headers)
- if err != nil {
- return
- }
- credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers)
- if err != nil {
- return
- }
-
- if credentials.AccessKeyID == "" {
- return result, errors.New("credentials: missing AccessKeyId credential")
- }
- if credentials.SecretAccessKey == "" {
- return result, errors.New("credentials: missing SecretAccessKey credential")
- }
-
- return credentials, nil
-}
-
-func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) {
- var result *AwsSecurityCredentials
-
- req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil)
- if err != nil {
- return result, err
- }
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return result, err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return result, err
- }
- if resp.StatusCode != http.StatusOK {
- return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody)
- }
- err = json.Unmarshal(respBody, &result)
- return result, err
-}
-
-func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) {
- if sp.CredVerificationURL == "" {
- return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint")
- }
- req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil)
- if err != nil {
- return "", err
- }
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody)
- }
- return string(respBody), nil
-}
-
-// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
-type awsRequestSigner struct {
- RegionName string
- AwsSecurityCredentials *AwsSecurityCredentials
-}
-
-// signRequest adds the appropriate headers to an http.Request
-// or returns an error if something prevented this.
-func (rs *awsRequestSigner) signRequest(req *http.Request) error {
- // req is assumed non-nil
- signedRequest := cloneRequest(req)
- timestamp := Now()
- signedRequest.Header.Set("host", requestHost(req))
- if rs.AwsSecurityCredentials.SessionToken != "" {
- signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
- }
- if signedRequest.Header.Get("date") == "" {
- signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong))
- }
- authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp)
- if err != nil {
- return err
- }
- signedRequest.Header.Set("Authorization", authorizationCode)
- req.Header = signedRequest.Header
- return nil
-}
-
-func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) {
- canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req)
- dateStamp := timestamp.Format(awsTimeFormatShort)
- serviceName := ""
-
- if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 {
- serviceName = splitHost[0]
- }
- credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/")
- requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData)
- if err != nil {
- return "", err
- }
- requestHash, err := getSha256([]byte(requestString))
- if err != nil {
- return "", err
- }
-
- stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n")
- signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey)
- for _, signingInput := range []string{
- dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign,
- } {
- signingKey, err = getHmacSha256(signingKey, []byte(signingInput))
- if err != nil {
- return "", err
- }
- }
-
- return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil
-}
-
-func getSha256(input []byte) (string, error) {
- hash := sha256.New()
- if _, err := hash.Write(input); err != nil {
- return "", err
- }
- return hex.EncodeToString(hash.Sum(nil)), nil
-}
-
-func getHmacSha256(key, input []byte) ([]byte, error) {
- hash := hmac.New(sha256.New, key)
- if _, err := hash.Write(input); err != nil {
- return nil, err
- }
- return hash.Sum(nil), nil
-}
-
-func cloneRequest(r *http.Request) *http.Request {
- r2 := new(http.Request)
- *r2 = *r
- if r.Header != nil {
- r2.Header = make(http.Header, len(r.Header))
-
- // Find total number of values.
- headerCount := 0
- for _, headerValues := range r.Header {
- headerCount += len(headerValues)
- }
- copiedHeaders := make([]string, headerCount) // shared backing array for headers' values
-
- for headerKey, headerValues := range r.Header {
- headerCount = copy(copiedHeaders, headerValues)
- r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount]
- copiedHeaders = copiedHeaders[headerCount:]
- }
- }
- return r2
-}
-
-func canonicalPath(req *http.Request) string {
- result := req.URL.EscapedPath()
- if result == "" {
- return "/"
- }
- return path.Clean(result)
-}
-
-func canonicalQuery(req *http.Request) string {
- queryValues := req.URL.Query()
- for queryKey := range queryValues {
- sort.Strings(queryValues[queryKey])
- }
- return queryValues.Encode()
-}
-
-func canonicalHeaders(req *http.Request) (string, string) {
- // Header keys need to be sorted alphabetically.
- var headers []string
- lowerCaseHeaders := make(http.Header)
- for k, v := range req.Header {
- k := strings.ToLower(k)
- if _, ok := lowerCaseHeaders[k]; ok {
- // include additional values
- lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...)
- } else {
- headers = append(headers, k)
- lowerCaseHeaders[k] = v
- }
- }
- sort.Strings(headers)
-
- var fullHeaders bytes.Buffer
- for _, header := range headers {
- headerValue := strings.Join(lowerCaseHeaders[header], ",")
- fullHeaders.WriteString(header)
- fullHeaders.WriteRune(':')
- fullHeaders.WriteString(headerValue)
- fullHeaders.WriteRune('\n')
- }
-
- return strings.Join(headers, ";"), fullHeaders.String()
-}
-
-func requestDataHash(req *http.Request) (string, error) {
- var requestData []byte
- if req.Body != nil {
- requestBody, err := req.GetBody()
- if err != nil {
- return "", err
- }
- defer requestBody.Close()
-
- requestData, err = internal.ReadAll(requestBody)
- if err != nil {
- return "", err
- }
- }
-
- return getSha256(requestData)
-}
-
-func requestHost(req *http.Request) string {
- if req.Host != "" {
- return req.Host
- }
- return req.URL.Host
-}
-
-func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) {
- dataHash, err := requestDataHash(req)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil
-}
-
-type awsRequestHeader struct {
- Key string `json:"key"`
- Value string `json:"value"`
-}
-
-type awsRequest struct {
- URL string `json:"url"`
- Method string `json:"method"`
- Headers []awsRequestHeader `json:"headers"`
-}
-
-// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
-// required.
-func canRetrieveRegionFromEnvironment() bool {
- return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != ""
-}
-
-// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
-func canRetrieveSecurityCredentialFromEnvironment() bool {
- return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != ""
-}
-
-func (sp *awsSubjectProvider) shouldUseMetadataServer() bool {
- return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
deleted file mode 100644
index d5765c47497..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
- "os/exec"
- "regexp"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- executableSupportedMaxVersion = 1
- executableDefaultTimeout = 30 * time.Second
- executableSource = "response"
- executableProviderType = "executable"
- outputFileSource = "output file"
-
- allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
-
- jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
- idTokenType = "urn:ietf:params:oauth:token-type:id_token"
- saml2TokenType = "urn:ietf:params:oauth:token-type:saml2"
-)
-
-var (
- serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`)
-)
-
-type nonCacheableError struct {
- message string
-}
-
-func (nce nonCacheableError) Error() string {
- return nce.message
-}
-
-// environment is a contract for testing
-type environment interface {
- existingEnv() []string
- getenv(string) string
- run(ctx context.Context, command string, env []string) ([]byte, error)
- now() time.Time
-}
-
-type runtimeEnvironment struct{}
-
-func (r runtimeEnvironment) existingEnv() []string {
- return os.Environ()
-}
-func (r runtimeEnvironment) getenv(key string) string {
- return os.Getenv(key)
-}
-func (r runtimeEnvironment) now() time.Time {
- return time.Now().UTC()
-}
-
-func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) {
- splitCommand := strings.Fields(command)
- cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...)
- cmd.Env = env
-
- var stdout, stderr bytes.Buffer
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
-
- if err := cmd.Run(); err != nil {
- if ctx.Err() == context.DeadlineExceeded {
- return nil, context.DeadlineExceeded
- }
- if exitError, ok := err.(*exec.ExitError); ok {
- return nil, exitCodeError(exitError)
- }
- return nil, executableError(err)
- }
-
- bytesStdout := bytes.TrimSpace(stdout.Bytes())
- if len(bytesStdout) > 0 {
- return bytesStdout, nil
- }
- return bytes.TrimSpace(stderr.Bytes()), nil
-}
-
-type executableSubjectProvider struct {
- Command string
- Timeout time.Duration
- OutputFile string
- client *http.Client
- opts *Options
- env environment
-}
-
-type executableResponse struct {
- Version int `json:"version,omitempty"`
- Success *bool `json:"success,omitempty"`
- TokenType string `json:"token_type,omitempty"`
- ExpirationTime int64 `json:"expiration_time,omitempty"`
- IDToken string `json:"id_token,omitempty"`
- SamlResponse string `json:"saml_response,omitempty"`
- Code string `json:"code,omitempty"`
- Message string `json:"message,omitempty"`
-}
-
-func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
- var result executableResponse
- if err := json.Unmarshal(response, &result); err != nil {
- return "", jsonParsingError(source, string(response))
- }
- // Validate
- if result.Version == 0 {
- return "", missingFieldError(source, "version")
- }
- if result.Success == nil {
- return "", missingFieldError(source, "success")
- }
- if !*result.Success {
- if result.Code == "" || result.Message == "" {
- return "", malformedFailureError()
- }
- return "", userDefinedError(result.Code, result.Message)
- }
- if result.Version > executableSupportedMaxVersion || result.Version < 0 {
- return "", unsupportedVersionError(source, result.Version)
- }
- if result.ExpirationTime == 0 && sp.OutputFile != "" {
- return "", missingFieldError(source, "expiration_time")
- }
- if result.TokenType == "" {
- return "", missingFieldError(source, "token_type")
- }
- if result.ExpirationTime != 0 && result.ExpirationTime < now {
- return "", tokenExpiredError()
- }
-
- switch result.TokenType {
- case jwtTokenType, idTokenType:
- if result.IDToken == "" {
- return "", missingFieldError(source, "id_token")
- }
- return result.IDToken, nil
- case saml2TokenType:
- if result.SamlResponse == "" {
- return "", missingFieldError(source, "saml_response")
- }
- return result.SamlResponse, nil
- default:
- return "", tokenTypeError(source)
- }
-}
-
-func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil {
- return token, err
- }
- return sp.getTokenFromExecutableCommand(ctx)
-}
-
-func (sp *executableSubjectProvider) providerType() string {
- return executableProviderType
-}
-
-func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) {
- if sp.OutputFile == "" {
- // This ExecutableCredentialSource doesn't use an OutputFile.
- return "", nil
- }
-
- file, err := os.Open(sp.OutputFile)
- if err != nil {
- // No OutputFile found. Hasn't been created yet, so skip it.
- return "", nil
- }
- defer file.Close()
-
- data, err := internal.ReadAll(file)
- if err != nil || len(data) == 0 {
- // Cachefile exists, but no data found. Get new credential.
- return "", nil
- }
-
- token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix())
- if err != nil {
- if _, ok := err.(nonCacheableError); ok {
- // If the cached token is expired we need a new token,
- // and if the cache contains a failure, we need to try again.
- return "", nil
- }
-
- // There was an error in the cached token, and the developer should be aware of it.
- return "", err
- }
- // Token parsing succeeded. Use found token.
- return token, nil
-}
-
-func (sp *executableSubjectProvider) executableEnvironment() []string {
- result := sp.env.existingEnv()
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience))
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType))
- result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0")
- if sp.opts.ServiceAccountImpersonationURL != "" {
- matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL)
- if matches != nil {
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1]))
- }
- }
- if sp.OutputFile != "" {
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile))
- }
- return result
-}
-
-func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) {
- // For security reasons, we need our consumers to set this environment variable to allow executables to be run.
- if sp.env.getenv(allowExecutablesEnvVar) != "1" {
- return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
- }
-
- ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout))
- defer cancel()
-
- output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment())
- if err != nil {
- return "", err
- }
- return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix())
-}
-
-func missingFieldError(source, field string) error {
- return fmt.Errorf("credentials: %q missing %q field", source, field)
-}
-
-func jsonParsingError(source, data string) error {
- return fmt.Errorf("credentials: unable to parse %q: %v", source, data)
-}
-
-func malformedFailureError() error {
- return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"}
-}
-
-func userDefinedError(code, message string) error {
- return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)}
-}
-
-func unsupportedVersionError(source string, version int) error {
- return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version)
-}
-
-func tokenExpiredError() error {
- return nonCacheableError{"credentials: the token returned by the executable is expired"}
-}
-
-func tokenTypeError(source string) error {
- return fmt.Errorf("credentials: %v contains unsupported token type", source)
-}
-
-func exitCodeError(err *exec.ExitError) error {
- return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err)
-}
-
-func executableError(err error) error {
- return fmt.Errorf("credentials: executable command failed: %w", err)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
deleted file mode 100644
index b19c6edeae5..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/impersonate"
- "cloud.google.com/go/auth/credentials/internal/stsexchange"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- timeoutMinimum = 5 * time.Second
- timeoutMaximum = 120 * time.Second
-
- universeDomainPlaceholder = "UNIVERSE_DOMAIN"
- defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
- defaultUniverseDomain = "googleapis.com"
-)
-
-var (
- // Now aliases time.Now for testing
- Now = func() time.Time {
- return time.Now().UTC()
- }
- validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
-)
-
-// Options stores the configuration for fetching tokens with external credentials.
-type Options struct {
- // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
- // identity pool or the workforce pool and the provider identifier in that pool.
- Audience string
- // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec
- // e.g. `urn:ietf:params:oauth:token-type:jwt`.
- SubjectTokenType string
- // TokenURL is the STS token exchange endpoint.
- TokenURL string
- // TokenInfoURL is the token_info endpoint used to retrieve the account related information (
- // user attributes like account identifier, eg. email, username, uid, etc). This is
- // needed for gCloud session account identification.
- TokenInfoURL string
- // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
- // required for workload identity pools when APIs to be accessed have not integrated with UberMint.
- ServiceAccountImpersonationURL string
- // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
- // token will be valid for.
- ServiceAccountImpersonationLifetimeSeconds int
- // ClientSecret is currently only required if token_info endpoint also
- // needs to be called with the generated GCP access token. When provided, STS will be
- // called with additional basic authentication using client_id as username and client_secret as password.
- ClientSecret string
- // ClientID is only required in conjunction with ClientSecret, as described above.
- ClientID string
- // CredentialSource contains the necessary information to retrieve the token itself, as well
- // as some environmental information.
- CredentialSource *credsfile.CredentialSource
- // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
- // will set the x-goog-user-project which overrides the project associated with the credentials.
- QuotaProjectID string
- // Scopes contains the desired scopes for the returned access token.
- Scopes []string
- // WorkforcePoolUserProject should be set when it is a workforce pool and
- // not a workload identity pool. The underlying principal must still have
- // serviceusage.services.use IAM permission to use the project for
- // billing/quota. Optional.
- WorkforcePoolUserProject string
- // UniverseDomain is the default service domain for a given Cloud universe.
- // This value will be used in the default STS token URL. The default value
- // is "googleapis.com". It will not be used if TokenURL is set. Optional.
- UniverseDomain string
- // SubjectTokenProvider is an optional token provider for OIDC/SAML
- // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider
- // or CredentialSource must be provided. Optional.
- SubjectTokenProvider SubjectTokenProvider
- // AwsSecurityCredentialsProvider is an AWS Security Credential provider
- // for AWS credentials. One of SubjectTokenProvider,
- // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional.
- AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
- // Client for token request.
- Client *http.Client
-}
-
-// SubjectTokenProvider can be used to supply a subject token to exchange for a
-// GCP access token.
-type SubjectTokenProvider interface {
- // SubjectToken should return a valid subject token or an error.
- // The external account token provider does not cache the returned subject
- // token, so caching logic should be implemented in the provider to prevent
- // multiple requests for the same subject token.
- SubjectToken(ctx context.Context, opts *RequestOptions) (string, error)
-}
-
-// RequestOptions contains information about the requested subject token or AWS
-// security credentials from the Google external account credential.
-type RequestOptions struct {
- // Audience is the requested audience for the external account credential.
- Audience string
- // Subject token type is the requested subject token type for the external
- // account credential. Expected values include:
- // “urn:ietf:params:oauth:token-type:jwt”
- // “urn:ietf:params:oauth:token-type:id-token”
- // “urn:ietf:params:oauth:token-type:saml2”
- // “urn:ietf:params:aws:token-type:aws4_request”
- SubjectTokenType string
-}
-
-// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials
-// and an AWS Region to exchange for a GCP access token.
-type AwsSecurityCredentialsProvider interface {
- // AwsRegion should return the AWS region or an error.
- AwsRegion(ctx context.Context, opts *RequestOptions) (string, error)
- // GetAwsSecurityCredentials should return a valid set of
- // AwsSecurityCredentials or an error. The external account token provider
- // does not cache the returned security credentials, so caching logic should
- // be implemented in the provider to prevent multiple requests for the
- // same security credentials.
- AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error)
-}
-
-// AwsSecurityCredentials models AWS security credentials.
-type AwsSecurityCredentials struct {
- // AccessKeyId is the AWS Access Key ID - Required.
- AccessKeyID string `json:"AccessKeyID"`
- // SecretAccessKey is the AWS Secret Access Key - Required.
- SecretAccessKey string `json:"SecretAccessKey"`
- // SessionToken is the AWS Session token. This should be provided for
- // temporary AWS security credentials - Optional.
- SessionToken string `json:"Token"`
-}
-
-func (o *Options) validate() error {
- if o.Audience == "" {
- return fmt.Errorf("externalaccount: Audience must be set")
- }
- if o.SubjectTokenType == "" {
- return fmt.Errorf("externalaccount: Subject token type must be set")
- }
- if o.WorkforcePoolUserProject != "" {
- if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid {
- return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials")
- }
- }
- count := 0
- if o.CredentialSource != nil {
- count++
- }
- if o.SubjectTokenProvider != nil {
- count++
- }
- if o.AwsSecurityCredentialsProvider != nil {
- count++
- }
- if count == 0 {
- return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
- }
- if count > 1 {
- return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
- }
- return nil
-}
-
-// resolveTokenURL sets the default STS token endpoint with the configured
-// universe domain.
-func (o *Options) resolveTokenURL() {
- if o.TokenURL != "" {
- return
- } else if o.UniverseDomain != "" {
- o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1)
- } else {
- o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
- }
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
-// configured with the provided options.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- opts.resolveTokenURL()
- stp, err := newSubjectTokenProvider(opts)
- if err != nil {
- return nil, err
- }
- tp := &tokenProvider{
- client: opts.Client,
- opts: opts,
- stp: stp,
- }
- if opts.ServiceAccountImpersonationURL == "" {
- return auth.NewCachedTokenProvider(tp, nil), nil
- }
-
- scopes := make([]string, len(opts.Scopes))
- copy(scopes, opts.Scopes)
- // needed for impersonation
- tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
- imp, err := impersonate.NewTokenProvider(&impersonate.Options{
- Client: opts.Client,
- URL: opts.ServiceAccountImpersonationURL,
- Scopes: scopes,
- Tp: auth.NewCachedTokenProvider(tp, nil),
- TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
- })
- if err != nil {
- return nil, err
- }
- return auth.NewCachedTokenProvider(imp, nil), nil
-}
-
-type subjectTokenProvider interface {
- subjectToken(ctx context.Context) (string, error)
- providerType() string
-}
-
-// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
-type tokenProvider struct {
- client *http.Client
- opts *Options
- stp subjectTokenProvider
-}
-
-func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
- subjectToken, err := tp.stp.subjectToken(ctx)
- if err != nil {
- return nil, err
- }
-
- stsRequest := &stsexchange.TokenRequest{
- GrantType: stsexchange.GrantType,
- Audience: tp.opts.Audience,
- Scope: tp.opts.Scopes,
- RequestedTokenType: stsexchange.TokenType,
- SubjectToken: subjectToken,
- SubjectTokenType: tp.opts.SubjectTokenType,
- }
- header := make(http.Header)
- header.Set("Content-Type", "application/x-www-form-urlencoded")
- header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp))
- clientAuth := stsexchange.ClientAuthentication{
- AuthStyle: auth.StyleInHeader,
- ClientID: tp.opts.ClientID,
- ClientSecret: tp.opts.ClientSecret,
- }
- var options map[string]interface{}
- // Do not pass workforce_pool_user_project when client authentication is used.
- // The client ID is sufficient for determining the user project.
- if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" {
- options = map[string]interface{}{
- "userProject": tp.opts.WorkforcePoolUserProject,
- }
- }
- stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{
- Client: tp.client,
- Endpoint: tp.opts.TokenURL,
- Request: stsRequest,
- Authentication: clientAuth,
- Headers: header,
- ExtraOpts: options,
- })
- if err != nil {
- return nil, err
- }
-
- tok := &auth.Token{
- Value: stsResp.AccessToken,
- Type: stsResp.TokenType,
- }
- // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior.
- if stsResp.ExpiresIn <= 0 {
- return nil, fmt.Errorf("credentials: got invalid expiry from security token service")
- }
- tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
- return tok, nil
-}
-
-// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
-// subjectTokenProvider
-func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
- reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
- if o.AwsSecurityCredentialsProvider != nil {
- return &awsSubjectProvider{
- securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
- TargetResource: o.Audience,
- reqOpts: reqOpts,
- }, nil
- } else if o.SubjectTokenProvider != nil {
- return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
- } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" {
- if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil {
- if awsVersion != 1 {
- return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion)
- }
-
- awsProvider := &awsSubjectProvider{
- EnvironmentID: o.CredentialSource.EnvironmentID,
- RegionURL: o.CredentialSource.RegionURL,
- RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL,
- CredVerificationURL: o.CredentialSource.URL,
- TargetResource: o.Audience,
- Client: o.Client,
- }
- if o.CredentialSource.IMDSv2SessionTokenURL != "" {
- awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
- }
-
- return awsProvider, nil
- }
- } else if o.CredentialSource.File != "" {
- return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
- } else if o.CredentialSource.URL != "" {
- return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
- } else if o.CredentialSource.Executable != nil {
- ec := o.CredentialSource.Executable
- if ec.Command == "" {
- return nil, errors.New("credentials: missing `command` field — executable command must be provided")
- }
-
- execProvider := &executableSubjectProvider{}
- execProvider.Command = ec.Command
- if ec.TimeoutMillis == 0 {
- execProvider.Timeout = executableDefaultTimeout
- } else {
- execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond
- if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum {
- return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds())
- }
- }
- execProvider.OutputFile = ec.OutputFile
- execProvider.client = o.Client
- execProvider.opts = o
- execProvider.env = runtimeEnvironment{}
- return execProvider, nil
- }
- return nil, errors.New("credentials: unable to parse credential source")
-}
-
-func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string {
- return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
- goVersion(),
- "unknown",
- p.providerType(),
- conf.ServiceAccountImpersonationURL != "",
- conf.ServiceAccountImpersonationLifetimeSeconds != 0)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
deleted file mode 100644
index 8186939fe1d..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "os"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- fileProviderType = "file"
-)
-
-type fileSubjectProvider struct {
- File string
- Format *credsfile.Format
-}
-
-func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) {
- tokenFile, err := os.Open(sp.File)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err)
- }
- defer tokenFile.Close()
- tokenBytes, err := internal.ReadAll(tokenFile)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to read credential file: %w", err)
- }
- tokenBytes = bytes.TrimSpace(tokenBytes)
-
- if sp.Format == nil {
- return string(tokenBytes), nil
- }
- switch sp.Format.Type {
- case fileTypeJSON:
- jsonData := make(map[string]interface{})
- err = json.Unmarshal(tokenBytes, &jsonData)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
- }
- val, ok := jsonData[sp.Format.SubjectTokenFieldName]
- if !ok {
- return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
- }
- token, ok := val.(string)
- if !ok {
- return "", errors.New("credentials: improperly formatted subject token")
- }
- return token, nil
- case fileTypeText:
- return string(tokenBytes), nil
- default:
- return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
- }
-}
-
-func (sp *fileSubjectProvider) providerType() string {
- return fileProviderType
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
deleted file mode 100644
index 8e4b4379b41..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "runtime"
- "strings"
- "unicode"
-)
-
-var (
- // version is a package internal global variable for testing purposes.
- version = runtime.Version
-)
-
-// versionUnknown is only used when the runtime version cannot be determined.
-const versionUnknown = "UNKNOWN"
-
-// goVersion returns a Go runtime version derived from the runtime environment
-// that is modified to be suitable for reporting in a header, meaning it has no
-// whitespace. If it is unable to determine the Go runtime version, it returns
-// versionUnknown.
-func goVersion() string {
- const develPrefix = "devel +"
-
- s := version()
- if strings.HasPrefix(s, develPrefix) {
- s = s[len(develPrefix):]
- if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
- return s
- } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
-
- notSemverRune := func(r rune) bool {
- return !strings.ContainsRune("0123456789.", r)
- }
-
- if strings.HasPrefix(s, "go1") {
- s = s[2:]
- var prerelease string
- if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
- s, prerelease = s[:p], s[p:]
- }
- if strings.HasSuffix(s, ".") {
- s += "0"
- } else if strings.Count(s, ".") < 2 {
- s += ".0"
- }
- if prerelease != "" {
- // Some release candidates already have a dash in them.
- if !strings.HasPrefix(prerelease, "-") {
- prerelease = "-" + prerelease
- }
- s += prerelease
- }
- return s
- }
- return versionUnknown
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
deleted file mode 100644
index be3c87351f7..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import "context"
-
-type programmaticProvider struct {
- opts *RequestOptions
- stp SubjectTokenProvider
-}
-
-func (pp *programmaticProvider) providerType() string {
- return programmaticProviderType
-}
-
-func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) {
- return pp.stp.SubjectToken(ctx, pp.opts)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
deleted file mode 100644
index 22b8af1c11b..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- fileTypeText = "text"
- fileTypeJSON = "json"
- urlProviderType = "url"
- programmaticProviderType = "programmatic"
-)
-
-type urlSubjectProvider struct {
- URL string
- Headers map[string]string
- Format *credsfile.Format
- Client *http.Client
-}
-
-func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil)
- if err != nil {
- return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err)
- }
-
- for key, val := range sp.Headers {
- req.Header.Add(key, val)
- }
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return "", fmt.Errorf("credentials: status code %d: %s", c, respBody)
- }
-
- if sp.Format == nil {
- return string(respBody), nil
- }
- switch sp.Format.Type {
- case "json":
- jsonData := make(map[string]interface{})
- err = json.Unmarshal(respBody, &jsonData)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
- }
- val, ok := jsonData[sp.Format.SubjectTokenFieldName]
- if !ok {
- return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
- }
- token, ok := val.(string)
- if !ok {
- return "", errors.New("credentials: improperly formatted subject token")
- }
- return token, nil
- case fileTypeText:
- return string(respBody), nil
- default:
- return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
- }
-}
-
-func (sp *urlSubjectProvider) providerType() string {
- return urlProviderType
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
deleted file mode 100644
index 0d788547987..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccountuser
-
-import (
- "context"
- "errors"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/stsexchange"
- "cloud.google.com/go/auth/internal"
-)
-
-// Options stores the configuration for fetching tokens with external authorized
-// user credentials.
-type Options struct {
- // Audience is the Secure Token Service (STS) audience which contains the
- // resource name for the workforce pool and the provider identifier in that
- // pool.
- Audience string
- // RefreshToken is the OAuth 2.0 refresh token.
- RefreshToken string
- // TokenURL is the STS token exchange endpoint for refresh.
- TokenURL string
- // TokenInfoURL is the STS endpoint URL for token introspection. Optional.
- TokenInfoURL string
- // ClientID is only required in conjunction with ClientSecret, as described
- // below.
- ClientID string
- // ClientSecret is currently only required if token_info endpoint also needs
- // to be called with the generated a cloud access token. When provided, STS
- // will be called with additional basic authentication using client_id as
- // username and client_secret as password.
- ClientSecret string
- // Scopes contains the desired scopes for the returned access token.
- Scopes []string
-
- // Client for token request.
- Client *http.Client
-}
-
-func (c *Options) validate() bool {
- return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != ""
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
-// configured with the provided options.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if !opts.validate() {
- return nil, errors.New("credentials: invalid external_account_authorized_user configuration")
- }
-
- tp := &tokenProvider{
- o: opts,
- }
- return auth.NewCachedTokenProvider(tp, nil), nil
-}
-
-type tokenProvider struct {
- o *Options
-}
-
-func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
- opts := tp.o
-
- clientAuth := stsexchange.ClientAuthentication{
- AuthStyle: auth.StyleInHeader,
- ClientID: opts.ClientID,
- ClientSecret: opts.ClientSecret,
- }
- headers := make(http.Header)
- headers.Set("Content-Type", "application/x-www-form-urlencoded")
- stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{
- Client: opts.Client,
- Endpoint: opts.TokenURL,
- RefreshToken: opts.RefreshToken,
- Authentication: clientAuth,
- Headers: headers,
- })
- if err != nil {
- return nil, err
- }
- if stsResponse.ExpiresIn < 0 {
- return nil, errors.New("credentials: invalid expiry from security token service")
- }
-
- // guarded by the wrapping with CachedTokenProvider
- if stsResponse.RefreshToken != "" {
- opts.RefreshToken = stsResponse.RefreshToken
- }
- return &auth.Token{
- Value: stsResponse.AccessToken,
- Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second),
- Type: internal.TokenTypeBearer,
- }, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
deleted file mode 100644
index 467edb9088e..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gdch
-
-import (
- "context"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-const (
- // GrantType is the grant type for the token request.
- GrantType = "urn:ietf:params:oauth:token-type:token-exchange"
- requestTokenType = "urn:ietf:params:oauth:token-type:access_token"
- subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount"
-)
-
-var (
- gdchSupportFormatVersions map[string]bool = map[string]bool{
- "1": true,
- }
-)
-
-// Options for [NewTokenProvider].
-type Options struct {
- STSAudience string
- Client *http.Client
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
-// GDCH cred file.
-func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) {
- if !gdchSupportFormatVersions[f.FormatVersion] {
- return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion)
- }
- if o.STSAudience == "" {
- return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
- }
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
- if err != nil {
- return nil, err
- }
- certPool, err := loadCertPool(f.CertPath)
- if err != nil {
- return nil, err
- }
-
- tp := gdchProvider{
- serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
- tokenURL: f.TokenURL,
- aud: o.STSAudience,
- pk: pk,
- pkID: f.PrivateKeyID,
- certPool: certPool,
- client: o.Client,
- }
- return tp, nil
-}
-
-func loadCertPool(path string) (*x509.CertPool, error) {
- pool := x509.NewCertPool()
- pem, err := os.ReadFile(path)
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to read certificate: %w", err)
- }
- pool.AppendCertsFromPEM(pem)
- return pool, nil
-}
-
-type gdchProvider struct {
- serviceIdentity string
- tokenURL string
- aud string
- pk *rsa.PrivateKey
- pkID string
- certPool *x509.CertPool
-
- client *http.Client
-}
-
-func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
- addCertToTransport(g.client, g.certPool)
- iat := time.Now()
- exp := iat.Add(time.Hour)
- claims := jwt.Claims{
- Iss: g.serviceIdentity,
- Sub: g.serviceIdentity,
- Aud: g.tokenURL,
- Iat: iat.Unix(),
- Exp: exp.Unix(),
- }
- h := jwt.Header{
- Algorithm: jwt.HeaderAlgRSA256,
- Type: jwt.HeaderType,
- KeyID: string(g.pkID),
- }
- payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
- if err != nil {
- return nil, err
- }
- v := url.Values{}
- v.Set("grant_type", GrantType)
- v.Set("audience", g.aud)
- v.Set("requested_token_type", requestTokenType)
- v.Set("subject_token", payload)
- v.Set("subject_token_type", subjectTokenType)
- resp, err := g.client.PostForm(g.tokenURL, v)
- if err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
- return nil, &auth.Error{
- Response: resp,
- Body: body,
- }
- }
-
- var tokenRes struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- ExpiresIn int64 `json:"expires_in"` // relative seconds from now
- }
- if err := json.Unmarshal(body, &tokenRes); err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- token := &auth.Token{
- Value: tokenRes.AccessToken,
- Type: tokenRes.TokenType,
- }
- raw := make(map[string]interface{})
- json.Unmarshal(body, &raw) // no error checks for optional fields
- token.Metadata = raw
-
- if secs := tokenRes.ExpiresIn; secs > 0 {
- token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
- }
- return token, nil
-}
-
-// addCertToTransport makes a best effort attempt at adding in the cert info to
-// the client. It tries to keep all configured transport settings if the
-// underlying transport is an http.Transport. Or else it overwrites the
-// transport with defaults adding in the certs.
-func addCertToTransport(hc *http.Client, certPool *x509.CertPool) {
- trans, ok := hc.Transport.(*http.Transport)
- if !ok {
- trans = http.DefaultTransport.(*http.Transport).Clone()
- }
- trans.TLSClientConfig = &tls.Config{
- RootCAs: certPool,
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
deleted file mode 100644
index 3ceab873b8e..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package impersonate
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- defaultTokenLifetime = "3600s"
- authHeaderKey = "Authorization"
-)
-
-// generateAccesstokenReq is used for service account impersonation
-type generateAccessTokenReq struct {
- Delegates []string `json:"delegates,omitempty"`
- Lifetime string `json:"lifetime,omitempty"`
- Scope []string `json:"scope,omitempty"`
-}
-
-type impersonateTokenResponse struct {
- AccessToken string `json:"accessToken"`
- ExpireTime string `json:"expireTime"`
-}
-
-// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL.
-// Scopes can be defined when the access token is requested.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- return opts, nil
-}
-
-// Options for [NewTokenProvider].
-type Options struct {
- // Tp is the source credential used to generate a token on the
- // impersonated service account. Required.
- Tp auth.TokenProvider
-
- // URL is the endpoint to call to generate a token
- // on behalf of the service account. Required.
- URL string
- // Scopes that the impersonated credential should have. Required.
- Scopes []string
- // Delegates are the service account email addresses in a delegation chain.
- // Each service account must be granted roles/iam.serviceAccountTokenCreator
- // on the next service account in the chain. Optional.
- Delegates []string
- // TokenLifetimeSeconds is the number of seconds the impersonation token will
- // be valid for. Defaults to 1 hour if unset. Optional.
- TokenLifetimeSeconds int
- // Client configures the underlying client used to make network requests
- // when fetching tokens. Required.
- Client *http.Client
-}
-
-func (o *Options) validate() error {
- if o.Tp == nil {
- return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials")
- }
- if o.URL == "" {
- return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials")
- }
- return nil
-}
-
-// Token performs the exchange to get a temporary service account token to allow access to GCP.
-func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
- lifetime := defaultTokenLifetime
- if o.TokenLifetimeSeconds != 0 {
- lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
- }
- reqBody := generateAccessTokenReq{
- Lifetime: lifetime,
- Scope: o.Scopes,
- Delegates: o.Delegates,
- }
- b, err := json.Marshal(reqBody)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to marshal request: %w", err)
- }
- req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
- }
- req.Header.Set("Content-Type", "application/json")
- if err := setAuthHeader(ctx, o.Tp, req); err != nil {
- return nil, err
- }
- resp, err := o.Client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to read body: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
- }
-
- var accessTokenResp impersonateTokenResponse
- if err := json.Unmarshal(body, &accessTokenResp); err != nil {
- return nil, fmt.Errorf("credentials: unable to parse response: %w", err)
- }
- expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
- }
- return &auth.Token{
- Value: accessTokenResp.AccessToken,
- Expiry: expiry,
- Type: internal.TokenTypeBearer,
- }, nil
-}
-
-func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
- t, err := tp.Token(ctx)
- if err != nil {
- return err
- }
- typ := t.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- r.Header.Set(authHeaderKey, typ+" "+t.Value)
- return nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/upstream/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
deleted file mode 100644
index f70e0aef48f..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stsexchange
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- // GrantType for a sts exchange.
- GrantType = "urn:ietf:params:oauth:grant-type:token-exchange"
- // TokenType for a sts exchange.
- TokenType = "urn:ietf:params:oauth:token-type:access_token"
-
- jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
-)
-
-// Options stores the configuration for making an sts exchange request.
-type Options struct {
- Client *http.Client
- Endpoint string
- Request *TokenRequest
- Authentication ClientAuthentication
- Headers http.Header
- // ExtraOpts are optional fields marshalled into the `options` field of the
- // request body.
- ExtraOpts map[string]interface{}
- RefreshToken string
-}
-
-// RefreshAccessToken performs the token exchange using a refresh token flow.
-func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
- data := url.Values{}
- data.Set("grant_type", "refresh_token")
- data.Set("refresh_token", opts.RefreshToken)
- return doRequest(ctx, opts, data)
-}
-
-// ExchangeToken performs an oauth2 token exchange with the provided endpoint.
-func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
- data := url.Values{}
- data.Set("audience", opts.Request.Audience)
- data.Set("grant_type", GrantType)
- data.Set("requested_token_type", TokenType)
- data.Set("subject_token_type", opts.Request.SubjectTokenType)
- data.Set("subject_token", opts.Request.SubjectToken)
- data.Set("scope", strings.Join(opts.Request.Scope, " "))
- if opts.ExtraOpts != nil {
- opts, err := json.Marshal(opts.ExtraOpts)
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err)
- }
- data.Set("options", string(opts))
- }
- return doRequest(ctx, opts, data)
-}
-
-func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
- opts.Authentication.InjectAuthentication(data, opts.Headers)
- encodedData := data.Encode()
-
- req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err)
-
- }
- for key, list := range opts.Headers {
- for _, val := range list {
- req.Header.Add(key, val)
- }
- }
- req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
-
- resp, err := opts.Client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
- }
- defer resp.Body.Close()
-
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
- return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
- }
- var stsResp TokenResponse
- if err := json.Unmarshal(body, &stsResp); err != nil {
- return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err)
- }
-
- return &stsResp, nil
-}
-
-// TokenRequest contains fields necessary to make an oauth2 token
-// exchange.
-type TokenRequest struct {
- ActingParty struct {
- ActorToken string
- ActorTokenType string
- }
- GrantType string
- Resource string
- Audience string
- Scope []string
- RequestedTokenType string
- SubjectToken string
- SubjectTokenType string
-}
-
-// TokenResponse is used to decode the remote server response during
-// an oauth2 token exchange.
-type TokenResponse struct {
- AccessToken string `json:"access_token"`
- IssuedTokenType string `json:"issued_token_type"`
- TokenType string `json:"token_type"`
- ExpiresIn int `json:"expires_in"`
- Scope string `json:"scope"`
- RefreshToken string `json:"refresh_token"`
-}
-
-// ClientAuthentication represents an OAuth client ID and secret and the
-// mechanism for passing these credentials as stated in rfc6749#2.3.1.
-type ClientAuthentication struct {
- AuthStyle auth.Style
- ClientID string
- ClientSecret string
-}
-
-// InjectAuthentication is used to add authentication to a Secure Token Service
-// exchange request. It modifies either the passed url.Values or http.Header
-// depending on the desired authentication format.
-func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) {
- if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil {
- return
- }
- switch c.AuthStyle {
- case auth.StyleInHeader:
- plainHeader := c.ClientID + ":" + c.ClientSecret
- headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader)))
- default:
- values.Set("client_id", c.ClientID)
- values.Set("client_secret", c.ClientSecret)
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/upstream/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
deleted file mode 100644
index b62a8ae4d5d..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "crypto/rsa"
- "fmt"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-var (
- // for testing
- now func() time.Time = time.Now
-)
-
-// configureSelfSignedJWT uses the private key in the service account to create
-// a JWT without making a network call.
-func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
- if err != nil {
- return nil, fmt.Errorf("credentials: could not parse key: %w", err)
- }
- return &selfSignedTokenProvider{
- email: f.ClientEmail,
- audience: opts.Audience,
- scopes: opts.scopes(),
- pk: pk,
- pkID: f.PrivateKeyID,
- }, nil
-}
-
-type selfSignedTokenProvider struct {
- email string
- audience string
- scopes []string
- pk *rsa.PrivateKey
- pkID string
-}
-
-func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
- iat := now()
- exp := iat.Add(time.Hour)
- scope := strings.Join(tp.scopes, " ")
- c := &jwt.Claims{
- Iss: tp.email,
- Sub: tp.email,
- Aud: tp.audience,
- Scope: scope,
- Iat: iat.Unix(),
- Exp: exp.Unix(),
- }
- h := &jwt.Header{
- Algorithm: jwt.HeaderAlgRSA256,
- Type: jwt.HeaderType,
- KeyID: string(tp.pkID),
- }
- msg, err := jwt.EncodeJWS(h, c, tp.pk)
- if err != nil {
- return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
- }
- return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go b/upstream/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go
deleted file mode 100644
index e6136080572..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/grpctransport/dial_socketopt.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package grpctransport
-
-import (
- "context"
- "net"
- "syscall"
-
- "google.golang.org/grpc"
-)
-
-const (
- // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By
- // default is 20 seconds.
- tcpUserTimeoutMilliseconds = 20000
-
- // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT.
- tcpUserTimeoutOp = 0x12
-)
-
-func init() {
- // timeoutDialerOption is a grpc.DialOption that contains dialer with
- // socket option TCP_USER_TIMEOUT. This dialer requires go versions 1.11+.
- timeoutDialerOption = grpc.WithContextDialer(dialTCPUserTimeout)
-}
-
-func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) {
- control := func(network, address string, c syscall.RawConn) error {
- var syscallErr error
- controlErr := c.Control(func(fd uintptr) {
- syscallErr = syscall.SetsockoptInt(
- int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds)
- })
- if syscallErr != nil {
- return syscallErr
- }
- if controlErr != nil {
- return controlErr
- }
- return nil
- }
- d := &net.Dialer{
- Control: control,
- }
- return d.DialContext(ctx, "tcp", addr)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/upstream/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
deleted file mode 100644
index 8dbfa7ef7e9..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package grpctransport
-
-import (
- "context"
- "net"
- "os"
- "strconv"
- "strings"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
- "google.golang.org/grpc"
- grpcgoogle "google.golang.org/grpc/credentials/google"
-)
-
-func isDirectPathEnabled(endpoint string, opts *Options) bool {
- if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath {
- return false
- }
- if !checkDirectPathEndPoint(endpoint) {
- return false
- }
- if b, _ := strconv.ParseBool(os.Getenv(disableDirectPathEnvVar)); b {
- return false
- }
- return true
-}
-
-func checkDirectPathEndPoint(endpoint string) bool {
- // Only [dns:///]host[:port] is supported, not other schemes (e.g., "tcp://" or "unix://").
- // Also don't try direct path if the user has chosen an alternate name resolver
- // (i.e., via ":///" prefix).
- if strings.Contains(endpoint, "://") && !strings.HasPrefix(endpoint, "dns:///") {
- return false
- }
-
- if endpoint == "" {
- return false
- }
-
- return true
-}
-
-func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
- if tp == nil {
- return false
- }
- tok, err := tp.Token(context.Background())
- if err != nil {
- return false
- }
- if tok == nil {
- return false
- }
- if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" {
- return false
- }
- if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" {
- return false
- }
- return true
-}
-
-func isDirectPathXdsUsed(o *Options) bool {
- // Method 1: Enable DirectPath xDS by env;
- if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b {
- return true
- }
- // Method 2: Enable DirectPath xDS by option;
- if o.InternalOptions != nil && o.InternalOptions.EnableDirectPathXds {
- return true
- }
- return false
-}
-
-// configureDirectPath returns some dial options and an endpoint to use if the
-// configuration allows the use of direct path. If it does not the provided
-// grpcOpts and endpoint are returned.
-func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
- if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
- // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
- grpcOpts = []grpc.DialOption{
- grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
- if timeoutDialerOption != nil {
- grpcOpts = append(grpcOpts, timeoutDialerOption)
- }
- // Check if google-c2p resolver is enabled for DirectPath
- if isDirectPathXdsUsed(opts) {
- // google-c2p resolver target must not have a port number
- if addr, _, err := net.SplitHostPort(endpoint); err == nil {
- endpoint = "google-c2p:///" + addr
- } else {
- endpoint = "google-c2p:///" + endpoint
- }
- } else {
- if !strings.HasPrefix(endpoint, "dns:///") {
- endpoint = "dns:///" + endpoint
- }
- grpcOpts = append(grpcOpts,
- // For now all DirectPath go clients will be using the following lb config, but in future
- // when different services need different configs, then we should change this to a
- // per-service config.
- grpc.WithDisableServiceConfig(),
- grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
- }
- // TODO: add support for system parameters (quota project, request reason) via chained interceptor.
- }
- return grpcOpts, endpoint
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/upstream/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
deleted file mode 100644
index 81c956b030b..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package grpctransport
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport"
- "go.opencensus.io/plugin/ocgrpc"
- "google.golang.org/grpc"
- grpccreds "google.golang.org/grpc/credentials"
- grpcinsecure "google.golang.org/grpc/credentials/insecure"
-)
-
-const (
- // Check env to disable DirectPath traffic.
- disableDirectPathEnvVar = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH"
-
- // Check env to decide if using google-c2p resolver for DirectPath traffic.
- enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
-
- quotaProjectHeaderKey = "X-Goog-User-Project"
-)
-
-var (
- // Set at init time by dial_socketopt.go. If nil, socketopt is not supported.
- timeoutDialerOption grpc.DialOption
-)
-
-// Options used to configure a [GRPCClientConnPool] from [Dial].
-type Options struct {
- // DisableTelemetry disables default telemetry (OpenCensus). An example
- // reason to do so would be to bind custom telemetry that overrides the
- // defaults.
- DisableTelemetry bool
- // DisableAuthentication specifies that no authentication should be used. It
- // is suitable only for testing and for accessing public resources, like
- // public Google Cloud Storage buckets.
- DisableAuthentication bool
- // Endpoint overrides the default endpoint to be used for a service.
- Endpoint string
- // Metadata is extra gRPC metadata that will be appended to every outgoing
- // request.
- Metadata map[string]string
- // GRPCDialOpts are dial options that will be passed to `grpc.Dial` when
- // establishing a`grpc.Conn``
- GRPCDialOpts []grpc.DialOption
- // PoolSize is specifies how many connections to balance between when making
- // requests. If unset or less than 1, the value defaults to 1.
- PoolSize int
- // Credentials used to add Authorization metadata to all requests. If set
- // DetectOpts are ignored.
- Credentials *auth.Credentials
- // DetectOpts configures settings for detect Application Default
- // Credentials.
- DetectOpts *credentials.DetectOptions
- // UniverseDomain is the default service domain for a given Cloud universe.
- // The default value is "googleapis.com". This is the universe domain
- // configured for the client, which will be compared to the universe domain
- // that is separately configured for the credentials.
- UniverseDomain string
-
- // InternalOptions are NOT meant to be set directly by consumers of this
- // package, they should only be set by generated client code.
- InternalOptions *InternalOptions
-}
-
-// client returns the client a user set for the detect options or nil if one was
-// not set.
-func (o *Options) client() *http.Client {
- if o.DetectOpts != nil && o.DetectOpts.Client != nil {
- return o.DetectOpts.Client
- }
- return nil
-}
-
-func (o *Options) validate() error {
- if o == nil {
- return errors.New("grpctransport: opts required to be non-nil")
- }
- if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
- return nil
- }
- hasCreds := o.Credentials != nil ||
- (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
- (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
- if o.DisableAuthentication && hasCreds {
- return errors.New("grpctransport: DisableAuthentication is incompatible with options that set or detect credentials")
- }
- return nil
-}
-
-func (o *Options) resolveDetectOptions() *credentials.DetectOptions {
- io := o.InternalOptions
- // soft-clone these so we are not updating a ref the user holds and may reuse
- do := transport.CloneDetectOptions(o.DetectOpts)
-
- // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
- if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
- do.UseSelfSignedJWT = true
- }
- // Only default scopes if user did not also set an audience.
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
- do.Scopes = make([]string, len(io.DefaultScopes))
- copy(do.Scopes, io.DefaultScopes)
- }
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
- do.Audience = o.InternalOptions.DefaultAudience
- }
- return do
-}
-
-// InternalOptions are only meant to be set by generated client code. These are
-// not meant to be set directly by consumers of this package. Configuration in
-// this type is considered EXPERIMENTAL and may be removed at any time in the
-// future without warning.
-type InternalOptions struct {
- // EnableNonDefaultSAForDirectPath overrides the default requirement for
- // using the default service account for DirectPath.
- EnableNonDefaultSAForDirectPath bool
- // EnableDirectPath overrides the default attempt to use DirectPath.
- EnableDirectPath bool
- // EnableDirectPathXds overrides the default DirectPath type. It is only
- // valid when DirectPath is enabled.
- EnableDirectPathXds bool
- // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
- EnableJWTWithScope bool
- // DefaultAudience specifies a default audience to be used as the audience
- // field ("aud") for the JWT token authentication.
- DefaultAudience string
- // DefaultEndpointTemplate combined with UniverseDomain specifies
- // the default endpoint.
- DefaultEndpointTemplate string
- // DefaultMTLSEndpoint specifies the default mTLS endpoint.
- DefaultMTLSEndpoint string
- // DefaultScopes specifies the default OAuth2 scopes to be used for a
- // service.
- DefaultScopes []string
- // SkipValidation bypasses validation on Options. It should only be used
- // internally for clients that needs more control over their transport.
- SkipValidation bool
-}
-
-// Dial returns a GRPCClientConnPool that can be used to communicate with a
-// Google cloud service, configured with the provided [Options]. It
-// automatically appends Authorization metadata to all outgoing requests.
-func Dial(ctx context.Context, secure bool, opts *Options) (GRPCClientConnPool, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- if opts.PoolSize <= 1 {
- conn, err := dial(ctx, secure, opts)
- if err != nil {
- return nil, err
- }
- return &singleConnPool{conn}, nil
- }
- pool := &roundRobinConnPool{}
- for i := 0; i < opts.PoolSize; i++ {
- conn, err := dial(ctx, secure, opts)
- if err != nil {
- // ignore close error, if any
- defer pool.Close()
- return nil, err
- }
- pool.conns = append(pool.conns, conn)
- }
- return pool, nil
-}
-
-// return a GRPCClientConnPool if pool == 1 or else a pool of of them if >1
-func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, error) {
- tOpts := &transport.Options{
- Endpoint: opts.Endpoint,
- Client: opts.client(),
- UniverseDomain: opts.UniverseDomain,
- }
- if io := opts.InternalOptions; io != nil {
- tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
- tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
- tOpts.EnableDirectPath = io.EnableDirectPath
- tOpts.EnableDirectPathXds = io.EnableDirectPathXds
- }
- transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
- if err != nil {
- return nil, err
- }
-
- if !secure {
- transportCreds = grpcinsecure.NewCredentials()
- }
-
- // Initialize gRPC dial options with transport-level security options.
- grpcOpts := []grpc.DialOption{
- grpc.WithTransportCredentials(transportCreds),
- }
-
- // Authentication can only be sent when communicating over a secure connection.
- if !opts.DisableAuthentication {
- metadata := opts.Metadata
-
- var creds *auth.Credentials
- if opts.Credentials != nil {
- creds = opts.Credentials
- } else {
- var err error
- creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
- if err != nil {
- return nil, err
- }
- }
-
- qp, err := creds.QuotaProjectID(ctx)
- if err != nil {
- return nil, err
- }
- if qp != "" {
- if metadata == nil {
- metadata = make(map[string]string, 1)
- }
- metadata[quotaProjectHeaderKey] = qp
- }
- grpcOpts = append(grpcOpts,
- grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
- creds: creds,
- metadata: metadata,
- clientUniverseDomain: opts.UniverseDomain,
- }),
- )
-
- // Attempt Direct Path
- grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds)
- }
-
- // Add tracing, but before the other options, so that clients can override the
- // gRPC stats handler.
- // This assumes that gRPC options are processed in order, left to right.
- grpcOpts = addOCStatsHandler(grpcOpts, opts)
- grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
-
- return grpc.DialContext(ctx, endpoint, grpcOpts...)
-}
-
-// grpcCredentialsProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
-type grpcCredentialsProvider struct {
- creds *auth.Credentials
-
- secure bool
-
- // Additional metadata attached as headers.
- metadata map[string]string
- clientUniverseDomain string
-}
-
-// getClientUniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com". This is the universe domain
-// configured for the client, which will be compared to the universe domain
-// that is separately configured for the credentials.
-func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
- if c.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
- }
- return c.clientUniverseDomain
-}
-
-func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
- credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
- if err != nil {
- return nil, err
- }
- if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
- return nil, err
- }
- token, err := c.creds.Token(ctx)
- if err != nil {
- return nil, err
- }
- if c.secure {
- ri, _ := grpccreds.RequestInfoFromContext(ctx)
- if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil {
- return nil, fmt.Errorf("unable to transfer credentials PerRPCCredentials: %v", err)
- }
- }
- metadata := make(map[string]string, len(c.metadata)+1)
- setAuthMetadata(token, metadata)
- for k, v := range c.metadata {
- metadata[k] = v
- }
- return metadata, nil
-}
-
-// setAuthMetadata uses the provided token to set the Authorization metadata.
-// If the token.Type is empty, the type is assumed to be Bearer.
-func setAuthMetadata(token *auth.Token, m map[string]string) {
- typ := token.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- m["authorization"] = typ + " " + token.Value
-}
-
-func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
- return c.secure
-}
-
-func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
- if opts.DisableTelemetry {
- return dialOpts
- }
- return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/grpctransport/pool.go b/upstream/vendor/cloud.google.com/go/auth/grpctransport/pool.go
deleted file mode 100644
index 642679f9b76..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/grpctransport/pool.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package grpctransport
-
-import (
- "context"
- "fmt"
- "sync/atomic"
-
- "google.golang.org/grpc"
-)
-
-// GRPCClientConnPool is an interface that satisfies
-// [google.golang.org/grpc.ClientConnInterface] and has some utility functions
-// that are needed for connection lifecycle when using in a client library. It
-// may be a pool or a single connection. This interface is not intended to, and
-// can't be, implemented by others.
-type GRPCClientConnPool interface {
- // Connection returns a [google.golang.org/grpc.ClientConn] from the pool.
- //
- // ClientConn aren't returned to the pool and should not be closed directly.
- Connection() *grpc.ClientConn
-
- // Len returns the number of connections in the pool. It will always return
- // the same value.
- Len() int
-
- // Close closes every ClientConn in the pool. The error returned by Close
- // may be a single error or multiple errors.
- Close() error
-
- grpc.ClientConnInterface
-
- // private ensure others outside this package can't implement this type
- private()
-}
-
-// singleConnPool is a special case for a single connection.
-type singleConnPool struct {
- *grpc.ClientConn
-}
-
-func (p *singleConnPool) Connection() *grpc.ClientConn { return p.ClientConn }
-func (p *singleConnPool) Len() int { return 1 }
-func (p *singleConnPool) private() {}
-
-type roundRobinConnPool struct {
- conns []*grpc.ClientConn
-
- idx uint32 // access via sync/atomic
-}
-
-func (p *roundRobinConnPool) Len() int {
- return len(p.conns)
-}
-
-func (p *roundRobinConnPool) Connection() *grpc.ClientConn {
- i := atomic.AddUint32(&p.idx, 1)
- return p.conns[i%uint32(len(p.conns))]
-}
-
-func (p *roundRobinConnPool) Close() error {
- var errs multiError
- for _, conn := range p.conns {
- if err := conn.Close(); err != nil {
- errs = append(errs, err)
- }
- }
- if len(errs) == 0 {
- return nil
- }
- return errs
-}
-
-func (p *roundRobinConnPool) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...grpc.CallOption) error {
- return p.Connection().Invoke(ctx, method, args, reply, opts...)
-}
-
-func (p *roundRobinConnPool) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- return p.Connection().NewStream(ctx, desc, method, opts...)
-}
-
-func (p *roundRobinConnPool) private() {}
-
-// multiError represents errors from multiple conns in the group.
-type multiError []error
-
-func (m multiError) Error() string {
- s, n := "", 0
- for _, e := range m {
- if e != nil {
- if n == 0 {
- s = e.Error()
- }
- n++
- }
- }
- switch n {
- case 0:
- return "(0 errors)"
- case 1:
- return s
- case 2:
- return s + " (and 1 other error)"
- }
- return fmt.Sprintf("%s (and %d other errors)", s, n-1)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/upstream/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
deleted file mode 100644
index 06acc04151a..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- "net/http"
-
- "cloud.google.com/go/auth"
- detect "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport"
-)
-
-// ClientCertProvider is a function that returns a TLS client certificate to be
-// used when opening TLS connections. It follows the same semantics as
-// [crypto/tls.Config.GetClientCertificate].
-type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
-
-// Options used to configure a [net/http.Client] from [NewClient].
-type Options struct {
- // DisableTelemetry disables default telemetry (OpenCensus). An example
- // reason to do so would be to bind custom telemetry that overrides the
- // defaults.
- DisableTelemetry bool
- // DisableAuthentication specifies that no authentication should be used. It
- // is suitable only for testing and for accessing public resources, like
- // public Google Cloud Storage buckets.
- DisableAuthentication bool
- // Headers are extra HTTP headers that will be appended to every outgoing
- // request.
- Headers http.Header
- // BaseRoundTripper overrides the base transport used for serving requests.
- // If specified ClientCertProvider is ignored.
- BaseRoundTripper http.RoundTripper
- // Endpoint overrides the default endpoint to be used for a service.
- Endpoint string
- // APIKey specifies an API key to be used as the basis for authentication.
- // If set DetectOpts are ignored.
- APIKey string
- // Credentials used to add Authorization header to all requests. If set
- // DetectOpts are ignored.
- Credentials *auth.Credentials
- // ClientCertProvider is a function that returns a TLS client certificate to
- // be used when opening TLS connections. It follows the same semantics as
- // crypto/tls.Config.GetClientCertificate.
- ClientCertProvider ClientCertProvider
- // DetectOpts configures settings for detect Application Default
- // Credentials.
- DetectOpts *detect.DetectOptions
- // UniverseDomain is the default service domain for a given Cloud universe.
- // The default value is "googleapis.com". This is the universe domain
- // configured for the client, which will be compared to the universe domain
- // that is separately configured for the credentials.
- UniverseDomain string
-
- // InternalOptions are NOT meant to be set directly by consumers of this
- // package, they should only be set by generated client code.
- InternalOptions *InternalOptions
-}
-
-func (o *Options) validate() error {
- if o == nil {
- return errors.New("httptransport: opts required to be non-nil")
- }
- if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
- return nil
- }
- hasCreds := o.APIKey != "" ||
- o.Credentials != nil ||
- (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
- (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
- if o.DisableAuthentication && hasCreds {
- return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials")
- }
- return nil
-}
-
-// client returns the client a user set for the detect options or nil if one was
-// not set.
-func (o *Options) client() *http.Client {
- if o.DetectOpts != nil && o.DetectOpts.Client != nil {
- return o.DetectOpts.Client
- }
- return nil
-}
-
-func (o *Options) resolveDetectOptions() *detect.DetectOptions {
- io := o.InternalOptions
- // soft-clone these so we are not updating a ref the user holds and may reuse
- do := transport.CloneDetectOptions(o.DetectOpts)
-
- // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
- if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
- do.UseSelfSignedJWT = true
- }
- // Only default scopes if user did not also set an audience.
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
- do.Scopes = make([]string, len(io.DefaultScopes))
- copy(do.Scopes, io.DefaultScopes)
- }
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
- do.Audience = o.InternalOptions.DefaultAudience
- }
- return do
-}
-
-// InternalOptions are only meant to be set by generated client code. These are
-// not meant to be set directly by consumers of this package. Configuration in
-// this type is considered EXPERIMENTAL and may be removed at any time in the
-// future without warning.
-type InternalOptions struct {
- // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
- EnableJWTWithScope bool
- // DefaultAudience specifies a default audience to be used as the audience
- // field ("aud") for the JWT token authentication.
- DefaultAudience string
- // DefaultEndpointTemplate combined with UniverseDomain specifies the
- // default endpoint.
- DefaultEndpointTemplate string
- // DefaultMTLSEndpoint specifies the default mTLS endpoint.
- DefaultMTLSEndpoint string
- // DefaultScopes specifies the default OAuth2 scopes to be used for a
- // service.
- DefaultScopes []string
- // SkipValidation bypasses validation on Options. It should only be used
- // internally for clients that needs more control over their transport.
- SkipValidation bool
-}
-
-// AddAuthorizationMiddleware adds a middleware to the provided client's
-// transport that sets the Authorization header with the value produced by the
-// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
-// if client or creds is nil.
-func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
- if client == nil || creds == nil {
- return fmt.Errorf("httptransport: client and tp must not be nil")
- }
- base := client.Transport
- if base == nil {
- base = http.DefaultTransport.(*http.Transport).Clone()
- }
- client.Transport = &authTransport{
- creds: creds,
- base: base,
- // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
- }
- return nil
-}
-
-// NewClient returns a [net/http.Client] that can be used to communicate with a
-// Google cloud service, configured with the provided [Options]. It
-// automatically appends Authorization headers to all outgoing requests.
-func NewClient(opts *Options) (*http.Client, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
-
- tOpts := &transport.Options{
- Endpoint: opts.Endpoint,
- ClientCertProvider: opts.ClientCertProvider,
- Client: opts.client(),
- UniverseDomain: opts.UniverseDomain,
- }
- if io := opts.InternalOptions; io != nil {
- tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
- tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
- }
- clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts)
- if err != nil {
- return nil, err
- }
- baseRoundTripper := opts.BaseRoundTripper
- if baseRoundTripper == nil {
- baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext)
- }
- trans, err := newTransport(baseRoundTripper, opts)
- if err != nil {
- return nil, err
- }
- return &http.Client{
- Transport: trans,
- }, nil
-}
-
-// SetAuthHeader uses the provided token to set the Authorization header on a
-// request. If the token.Type is empty, the type is assumed to be Bearer.
-func SetAuthHeader(token *auth.Token, req *http.Request) {
- typ := token.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- req.Header.Set("Authorization", typ+" "+token.Value)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/httptransport/trace.go b/upstream/vendor/cloud.google.com/go/auth/httptransport/trace.go
deleted file mode 100644
index 467c477c04d..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/httptransport/trace.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- cloudTraceHeader = `X-Cloud-Trace-Context`
-)
-
-// asserts the httpFormat fulfills this foreign interface
-var _ propagation.HTTPFormat = (*httpFormat)(nil)
-
-// httpFormat implements propagation.httpFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
-type httpFormat struct{}
-
-// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
-func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(cloudTraceHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 32)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Cloud Trace header.
-func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(cloudTraceHeader, header)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/httptransport/transport.go b/upstream/vendor/cloud.google.com/go/auth/httptransport/transport.go
deleted file mode 100644
index 94caeb00f0a..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "context"
- "crypto/tls"
- "net"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport"
- "cloud.google.com/go/auth/internal/transport/cert"
- "go.opencensus.io/plugin/ochttp"
- "golang.org/x/net/http2"
-)
-
-const (
- quotaProjectHeaderKey = "X-Goog-User-Project"
-)
-
-func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
- var headers = opts.Headers
- ht := &headerTransport{
- base: base,
- headers: headers,
- }
- var trans http.RoundTripper = ht
- trans = addOCTransport(trans, opts)
- switch {
- case opts.DisableAuthentication:
- // Do nothing.
- case opts.APIKey != "":
- qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey))
- if qp != "" {
- if headers == nil {
- headers = make(map[string][]string, 1)
- }
- headers.Set(quotaProjectHeaderKey, qp)
- }
- trans = &apiKeyTransport{
- Transport: trans,
- Key: opts.APIKey,
- }
- default:
- var creds *auth.Credentials
- if opts.Credentials != nil {
- creds = opts.Credentials
- } else {
- var err error
- creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
- if err != nil {
- return nil, err
- }
- }
- qp, err := creds.QuotaProjectID(context.Background())
- if err != nil {
- return nil, err
- }
- if qp != "" {
- if headers == nil {
- headers = make(map[string][]string, 1)
- }
- headers.Set(quotaProjectHeaderKey, qp)
- }
- creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
- trans = &authTransport{
- base: trans,
- creds: creds,
- clientUniverseDomain: opts.UniverseDomain,
- }
- }
- return trans, nil
-}
-
-// defaultBaseTransport returns the base HTTP transport.
-// On App Engine, this is urlfetch.Transport.
-// Otherwise, use a default transport, taking most defaults from
-// http.DefaultTransport.
-// If TLSCertificate is available, set TLSClientConfig as well.
-func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
- trans := http.DefaultTransport.(*http.Transport).Clone()
- trans.MaxIdleConnsPerHost = 100
-
- if clientCertSource != nil {
- trans.TLSClientConfig = &tls.Config{
- GetClientCertificate: clientCertSource,
- }
- }
- if dialTLSContext != nil {
- // If DialTLSContext is set, TLSClientConfig wil be ignored
- trans.DialTLSContext = dialTLSContext
- }
-
- // Configures the ReadIdleTimeout HTTP/2 option for the
- // transport. This allows broken idle connections to be pruned more quickly,
- // preventing the client from attempting to re-use connections that will no
- // longer work.
- http2Trans, err := http2.ConfigureTransports(trans)
- if err == nil {
- http2Trans.ReadIdleTimeout = time.Second * 31
- }
-
- return trans
-}
-
-type apiKeyTransport struct {
- // Key is the API Key to set on requests.
- Key string
- // Transport is the underlying HTTP transport.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-}
-
-func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- newReq := *req
- args := newReq.URL.Query()
- args.Set("key", t.Key)
- newReq.URL.RawQuery = args.Encode()
- return t.Transport.RoundTrip(&newReq)
-}
-
-type headerTransport struct {
- headers http.Header
- base http.RoundTripper
-}
-
-func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base
- newReq := *req
- newReq.Header = make(http.Header)
- for k, vv := range req.Header {
- newReq.Header[k] = vv
- }
-
- for k, v := range t.headers {
- newReq.Header[k] = v
- }
-
- return rt.RoundTrip(&newReq)
-}
-
-func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
- if opts.DisableTelemetry {
- return trans
- }
- return &ochttp.Transport{
- Base: trans,
- Propagation: &httpFormat{},
- }
-}
-
-type authTransport struct {
- creds *auth.Credentials
- base http.RoundTripper
- clientUniverseDomain string
-}
-
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
-func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
- }
- return t.clientUniverseDomain
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token from Transport's Source. Per the RoundTripper contract we must
-// not modify the initial request, so we clone it, and we must close the body
-// on any errors that happens during our token logic.
-func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- reqBodyClosed := false
- if req.Body != nil {
- defer func() {
- if !reqBodyClosed {
- req.Body.Close()
- }
- }()
- }
- credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
- if err != nil {
- return nil, err
- }
- if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
- return nil, err
- }
- token, err := t.creds.Token(req.Context())
- if err != nil {
- return nil, err
- }
- req2 := req.Clone(req.Context())
- SetAuthHeader(token, req2)
- reqBodyClosed = true
- return t.base.RoundTrip(req2)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
deleted file mode 100644
index 9cd4bed61b5..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credsfile is meant to hide implementation details from the pubic
-// surface of the detect package. It should not import any other packages in
-// this module. It is located under the main internal package so other
-// sub-packages can use these parsed types as well.
-package credsfile
-
-import (
- "os"
- "os/user"
- "path/filepath"
- "runtime"
-)
-
-const (
- // GoogleAppCredsEnvVar is the environment variable for setting the
- // application default credentials.
- GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
- userCredsFilename = "application_default_credentials.json"
-)
-
-// CredentialType represents different credential filetypes Google credentials
-// can be.
-type CredentialType int
-
-const (
- // UnknownCredType is an unidentified file type.
- UnknownCredType CredentialType = iota
- // UserCredentialsKey represents a user creds file type.
- UserCredentialsKey
- // ServiceAccountKey represents a service account file type.
- ServiceAccountKey
- // ImpersonatedServiceAccountKey represents a impersonated service account
- // file type.
- ImpersonatedServiceAccountKey
- // ExternalAccountKey represents a external account file type.
- ExternalAccountKey
- // GDCHServiceAccountKey represents a GDCH file type.
- GDCHServiceAccountKey
- // ExternalAccountAuthorizedUserKey represents a external account authorized
- // user file type.
- ExternalAccountAuthorizedUserKey
-)
-
-// parseCredentialType returns the associated filetype based on the parsed
-// typeString provided.
-func parseCredentialType(typeString string) CredentialType {
- switch typeString {
- case "service_account":
- return ServiceAccountKey
- case "authorized_user":
- return UserCredentialsKey
- case "impersonated_service_account":
- return ImpersonatedServiceAccountKey
- case "external_account":
- return ExternalAccountKey
- case "external_account_authorized_user":
- return ExternalAccountAuthorizedUserKey
- case "gdch_service_account":
- return GDCHServiceAccountKey
- default:
- return UnknownCredType
- }
-}
-
-// GetFileNameFromEnv returns the override if provided or detects a filename
-// from the environment.
-func GetFileNameFromEnv(override string) string {
- if override != "" {
- return override
- }
- return os.Getenv(GoogleAppCredsEnvVar)
-}
-
-// GetWellKnownFileName tries to locate the filepath for the user credential
-// file based on the environment.
-func GetWellKnownFileName() string {
- if runtime.GOOS == "windows" {
- return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename)
- }
- return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename)
-}
-
-// guessUnixHomeDir default to checking for HOME, but not all unix systems have
-// this set, do have a fallback.
-func guessUnixHomeDir() string {
- if v := os.Getenv("HOME"); v != "" {
- return v
- }
- if u, err := user.Current(); err == nil {
- return u.HomeDir
- }
- return ""
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
deleted file mode 100644
index 69e30779f98..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credsfile
-
-import (
- "encoding/json"
-)
-
-// Config3LO is the internals of a client creds file.
-type Config3LO struct {
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- RedirectURIs []string `json:"redirect_uris"`
- AuthURI string `json:"auth_uri"`
- TokenURI string `json:"token_uri"`
-}
-
-// ClientCredentialsFile representation.
-type ClientCredentialsFile struct {
- Web *Config3LO `json:"web"`
- Installed *Config3LO `json:"installed"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ServiceAccountFile representation.
-type ServiceAccountFile struct {
- Type string `json:"type"`
- ProjectID string `json:"project_id"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- ClientEmail string `json:"client_email"`
- ClientID string `json:"client_id"`
- AuthURL string `json:"auth_uri"`
- TokenURL string `json:"token_uri"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// UserCredentialsFile representation.
-type UserCredentialsFile struct {
- Type string `json:"type"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- QuotaProjectID string `json:"quota_project_id"`
- RefreshToken string `json:"refresh_token"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ExternalAccountFile representation.
-type ExternalAccountFile struct {
- Type string `json:"type"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- Audience string `json:"audience"`
- SubjectTokenType string `json:"subject_token_type"`
- ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
- TokenURL string `json:"token_url"`
- CredentialSource *CredentialSource `json:"credential_source,omitempty"`
- TokenInfoURL string `json:"token_info_url"`
- ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"`
- QuotaProjectID string `json:"quota_project_id"`
- WorkforcePoolUserProject string `json:"workforce_pool_user_project"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ExternalAccountAuthorizedUserFile representation.
-type ExternalAccountAuthorizedUserFile struct {
- Type string `json:"type"`
- Audience string `json:"audience"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- RefreshToken string `json:"refresh_token"`
- TokenURL string `json:"token_url"`
- TokenInfoURL string `json:"token_info_url"`
- RevokeURL string `json:"revoke_url"`
- QuotaProjectID string `json:"quota_project_id"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
-//
-// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question.
-// The EnvironmentID should start with AWS if being used for an AWS credential.
-type CredentialSource struct {
- File string `json:"file"`
- URL string `json:"url"`
- Headers map[string]string `json:"headers"`
- Executable *ExecutableConfig `json:"executable,omitempty"`
- EnvironmentID string `json:"environment_id"`
- RegionURL string `json:"region_url"`
- RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
- CredVerificationURL string `json:"cred_verification_url"`
- IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
- Format *Format `json:"format,omitempty"`
-}
-
-// Format describes the format of a [CredentialSource].
-type Format struct {
- // Type is either "text" or "json". When not provided "text" type is assumed.
- Type string `json:"type"`
- // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure.
- SubjectTokenFieldName string `json:"subject_token_field_name"`
-}
-
-// ExecutableConfig represents the command to run for an executable
-// [CredentialSource].
-type ExecutableConfig struct {
- Command string `json:"command"`
- TimeoutMillis int `json:"timeout_millis"`
- OutputFile string `json:"output_file"`
-}
-
-// ServiceAccountImpersonationInfo has impersonation configuration.
-type ServiceAccountImpersonationInfo struct {
- TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
-}
-
-// ImpersonatedServiceAccountFile representation.
-type ImpersonatedServiceAccountFile struct {
- Type string `json:"type"`
- ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
- Delegates []string `json:"delegates"`
- CredSource json.RawMessage `json:"source_credentials"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file.
-type GDCHServiceAccountFile struct {
- Type string `json:"type"`
- FormatVersion string `json:"format_version"`
- Project string `json:"project"`
- Name string `json:"name"`
- CertPath string `json:"ca_cert_path"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- TokenURL string `json:"token_uri"`
- UniverseDomain string `json:"universe_domain"`
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
deleted file mode 100644
index a02b9f5df7e..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credsfile
-
-import (
- "encoding/json"
-)
-
-// ParseServiceAccount parses bytes into a [ServiceAccountFile].
-func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) {
- var f *ServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseClientCredentials parses bytes into a
-// [credsfile.ClientCredentialsFile].
-func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) {
- var f *ClientCredentialsFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseUserCredentials parses bytes into a [UserCredentialsFile].
-func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) {
- var f *UserCredentialsFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseExternalAccount parses bytes into a [ExternalAccountFile].
-func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) {
- var f *ExternalAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseExternalAccountAuthorizedUser parses bytes into a
-// [ExternalAccountAuthorizedUserFile].
-func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) {
- var f *ExternalAccountAuthorizedUserFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseImpersonatedServiceAccount parses bytes into a
-// [ImpersonatedServiceAccountFile].
-func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) {
- var f *ImpersonatedServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile].
-func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) {
- var f *GDCHServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-type fileTypeChecker struct {
- Type string `json:"type"`
-}
-
-// ParseFileType determines the [CredentialType] based on bytes provided.
-func ParseFileType(b []byte) (CredentialType, error) {
- var f fileTypeChecker
- if err := json.Unmarshal(b, &f); err != nil {
- return 0, err
- }
- return parseCredentialType(f.Type), nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/internal.go b/upstream/vendor/cloud.google.com/go/auth/internal/internal.go
deleted file mode 100644
index 70534e809a4..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/internal.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "context"
- "crypto/rsa"
- "crypto/x509"
- "encoding/json"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "net/http"
- "os"
- "sync"
- "time"
-
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- // TokenTypeBearer is the auth header prefix for bearer tokens.
- TokenTypeBearer = "Bearer"
-
- // QuotaProjectEnvVar is the environment variable for setting the quota
- // project.
- QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
-
- // DefaultUniverseDomain is the default value for universe domain.
- // Universe domain is the default service domain for a given Cloud universe.
- DefaultUniverseDomain = "googleapis.com"
-)
-
-// CloneDefaultClient returns a [http.Client] with some good defaults.
-func CloneDefaultClient() *http.Client {
- return &http.Client{
- Transport: http.DefaultTransport.(*http.Transport).Clone(),
- Timeout: 30 * time.Second,
- }
-}
-
-// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
- block, _ := pem.Decode(key)
- if block != nil {
- key = block.Bytes
- }
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
- if err != nil {
- parsedKey, err = x509.ParsePKCS1PrivateKey(key)
- if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
- }
- }
- parsed, ok := parsedKey.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("private key is invalid")
- }
- return parsed, nil
-}
-
-// GetQuotaProject retrieves quota project with precedence being: override,
-// environment variable, creds json file.
-func GetQuotaProject(b []byte, override string) string {
- if override != "" {
- return override
- }
- if env := os.Getenv(QuotaProjectEnvVar); env != "" {
- return env
- }
- if b == nil {
- return ""
- }
- var v struct {
- QuotaProject string `json:"quota_project_id"`
- }
- if err := json.Unmarshal(b, &v); err != nil {
- return ""
- }
- return v.QuotaProject
-}
-
-// GetProjectID retrieves project with precedence being: override,
-// environment variable, creds json file.
-func GetProjectID(b []byte, override string) string {
- if override != "" {
- return override
- }
- if env := os.Getenv(projectEnvVar); env != "" {
- return env
- }
- if b == nil {
- return ""
- }
- var v struct {
- ProjectID string `json:"project_id"` // standard service account key
- Project string `json:"project"` // gdch key
- }
- if err := json.Unmarshal(b, &v); err != nil {
- return ""
- }
- if v.ProjectID != "" {
- return v.ProjectID
- }
- return v.Project
-}
-
-// ReadAll consumes the whole reader and safely reads the content of its body
-// with some overflow protection.
-func ReadAll(r io.Reader) ([]byte, error) {
- return io.ReadAll(io.LimitReader(r, maxBodySize))
-}
-
-// StaticCredentialsProperty is a helper for creating static credentials
-// properties.
-func StaticCredentialsProperty(s string) StaticProperty {
- return StaticProperty(s)
-}
-
-// StaticProperty always returns that value of the underlying string.
-type StaticProperty string
-
-// GetProperty loads the properly value provided the given context.
-func (p StaticProperty) GetProperty(context.Context) (string, error) {
- return string(p), nil
-}
-
-// ComputeUniverseDomainProvider fetches the credentials universe domain from
-// the google cloud metadata service.
-type ComputeUniverseDomainProvider struct {
- universeDomainOnce sync.Once
- universeDomain string
- universeDomainErr error
-}
-
-// GetProperty fetches the credentials universe domain from the google cloud
-// metadata service.
-func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
- c.universeDomainOnce.Do(func() {
- c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
- })
- if c.universeDomainErr != nil {
- return "", c.universeDomainErr
- }
- return c.universeDomain, nil
-}
-
-// httpGetMetadataUniverseDomain is a package var for unit test substitution.
-var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
- client := metadata.NewClient(&http.Client{Timeout: time.Second})
- // TODO(quartzmo): set ctx on request
- return client.Get("universe/universe_domain")
-}
-
-func getMetadataUniverseDomain(ctx context.Context) (string, error) {
- universeDomain, err := httpGetMetadataUniverseDomain(ctx)
- if err == nil {
- return universeDomain, nil
- }
- if _, ok := err.(metadata.NotDefinedError); ok {
- // http.StatusNotFound (404)
- return DefaultUniverseDomain, nil
- }
- return "", err
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/upstream/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
deleted file mode 100644
index dc28b3c3bb5..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jwt
-
-import (
- "bytes"
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "strings"
- "time"
-)
-
-const (
- // HeaderAlgRSA256 is the RS256 [Header.Algorithm].
- HeaderAlgRSA256 = "RS256"
- // HeaderAlgES256 is the ES256 [Header.Algorithm].
- HeaderAlgES256 = "ES256"
- // HeaderType is the standard [Header.Type].
- HeaderType = "JWT"
-)
-
-// Header represents a JWT header.
-type Header struct {
- Algorithm string `json:"alg"`
- Type string `json:"typ"`
- KeyID string `json:"kid"`
-}
-
-func (h *Header) encode() (string, error) {
- b, err := json.Marshal(h)
- if err != nil {
- return "", err
- }
- return base64.RawURLEncoding.EncodeToString(b), nil
-}
-
-// Claims represents the claims set of a JWT.
-type Claims struct {
- // Iss is the issuer JWT claim.
- Iss string `json:"iss"`
- // Scope is the scope JWT claim.
- Scope string `json:"scope,omitempty"`
- // Exp is the expiry JWT claim. If unset, default is in one hour from now.
- Exp int64 `json:"exp"`
- // Iat is the subject issued at claim. If unset, default is now.
- Iat int64 `json:"iat"`
- // Aud is the audience JWT claim. Optional.
- Aud string `json:"aud"`
- // Sub is the subject JWT claim. Optional.
- Sub string `json:"sub,omitempty"`
- // AdditionalClaims contains any additional non-standard JWT claims. Optional.
- AdditionalClaims map[string]interface{} `json:"-"`
-}
-
-func (c *Claims) encode() (string, error) {
- // Compensate for skew
- now := time.Now().Add(-10 * time.Second)
- if c.Iat == 0 {
- c.Iat = now.Unix()
- }
- if c.Exp == 0 {
- c.Exp = now.Add(time.Hour).Unix()
- }
- if c.Exp < c.Iat {
- return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat)
- }
-
- b, err := json.Marshal(c)
- if err != nil {
- return "", err
- }
-
- if len(c.AdditionalClaims) == 0 {
- return base64.RawURLEncoding.EncodeToString(b), nil
- }
-
- // Marshal private claim set and then append it to b.
- prv, err := json.Marshal(c.AdditionalClaims)
- if err != nil {
- return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err)
- }
-
- // Concatenate public and private claim JSON objects.
- if !bytes.HasSuffix(b, []byte{'}'}) {
- return "", fmt.Errorf("invalid JSON %s", b)
- }
- if !bytes.HasPrefix(prv, []byte{'{'}) {
- return "", fmt.Errorf("invalid JSON %s", prv)
- }
- b[len(b)-1] = ',' // Replace closing curly brace with a comma.
- b = append(b, prv[1:]...) // Append private claims.
- return base64.RawURLEncoding.EncodeToString(b), nil
-}
-
-// EncodeJWS encodes the data using the provided key as a JSON web signature.
-func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
- head, err := header.encode()
- if err != nil {
- return "", err
- }
- claims, err := c.encode()
- if err != nil {
- return "", err
- }
- ss := fmt.Sprintf("%s.%s", head, claims)
- h := sha256.New()
- h.Write([]byte(ss))
- sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
-}
-
-// DecodeJWS decodes a claim set from a JWS payload.
-func DecodeJWS(payload string) (*Claims, error) {
- // decode returned id token to get expiry
- s := strings.Split(payload, ".")
- if len(s) < 2 {
- return nil, errors.New("invalid token received")
- }
- decoded, err := base64.RawURLEncoding.DecodeString(s[1])
- if err != nil {
- return nil, err
- }
- c := &Claims{}
- if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil {
- return nil, err
- }
- if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil {
- return nil, err
- }
- return c, err
-}
-
-// VerifyJWS tests whether the provided JWT token's signature was produced by
-// the private key associated with the provided public key.
-func VerifyJWS(token string, key *rsa.PublicKey) error {
- parts := strings.Split(token, ".")
- if len(parts) != 3 {
- return errors.New("jwt: invalid token received, token must have 3 parts")
- }
-
- signedContent := parts[0] + "." + parts[1]
- signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
- if err != nil {
- return err
- }
-
- h := sha256.New()
- h.Write([]byte(signedContent))
- return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/cba.go
deleted file mode 100644
index 75734906259..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport/cert"
- "github.com/google/s2a-go"
- "github.com/google/s2a-go/fallback"
- "google.golang.org/grpc/credentials"
-)
-
-const (
- mTLSModeAlways = "always"
- mTLSModeNever = "never"
- mTLSModeAuto = "auto"
-
- // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
- googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
- googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
- googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT"
- googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
-
- universeDomainPlaceholder = "UNIVERSE_DOMAIN"
-)
-
-var (
- mdsMTLSAutoConfigSource mtlsConfigSource
- errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
-)
-
-// Options is a struct that is duplicated information from the individual
-// transport packages in order to avoid cyclic deps. It correlates 1:1 with
-// fields on httptransport.Options and grpctransport.Options.
-type Options struct {
- Endpoint string
- DefaultMTLSEndpoint string
- DefaultEndpointTemplate string
- ClientCertProvider cert.Provider
- Client *http.Client
- UniverseDomain string
- EnableDirectPath bool
- EnableDirectPathXds bool
-}
-
-// getUniverseDomain returns the default service domain for a given Cloud
-// universe.
-func (o *Options) getUniverseDomain() string {
- if o.UniverseDomain == "" {
- return internal.DefaultUniverseDomain
- }
- return o.UniverseDomain
-}
-
-// isUniverseDomainGDU returns true if the universe domain is the default Google
-// universe.
-func (o *Options) isUniverseDomainGDU() bool {
- return o.getUniverseDomain() == internal.DefaultUniverseDomain
-}
-
-// defaultEndpoint returns the DefaultEndpointTemplate merged with the
-// universe domain if the DefaultEndpointTemplate is set, otherwise returns an
-// empty string.
-func (o *Options) defaultEndpoint() string {
- if o.DefaultEndpointTemplate == "" {
- return ""
- }
- return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
-}
-
-// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
-// default endpoint.
-func (o *Options) mergedEndpoint() (string, error) {
- defaultEndpoint := o.defaultEndpoint()
- u, err := url.Parse(fixScheme(defaultEndpoint))
- if err != nil {
- return "", err
- }
- return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil
-}
-
-func fixScheme(baseURL string) string {
- if !strings.Contains(baseURL, "://") {
- baseURL = "https://" + baseURL
- }
- return baseURL
-}
-
-// GetGRPCTransportCredsAndEndpoint returns an instance of
-// [google.golang.org/grpc/credentials.TransportCredentials], and the
-// corresponding endpoint to use for GRPC client.
-func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
- config, err := getTransportConfig(opts)
- if err != nil {
- return nil, "", err
- }
-
- defaultTransportCreds := credentials.NewTLS(&tls.Config{
- GetClientCertificate: config.clientCertSource,
- })
- if config.s2aAddress == "" {
- return defaultTransportCreds, config.endpoint, nil
- }
-
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackClientHandshakeFunc: fallbackHandshake,
- }
- }
-
- s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
- })
- if err != nil {
- // Use default if we cannot initialize S2A client transport credentials.
- return defaultTransportCreds, config.endpoint, nil
- }
- return s2aTransportCreds, config.s2aMTLSEndpoint, nil
-}
-
-// GetHTTPTransportConfig returns a client certificate source and a function for
-// dialing MTLS with S2A.
-func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) {
- config, err := getTransportConfig(opts)
- if err != nil {
- return nil, nil, err
- }
-
- if config.s2aAddress == "" {
- return config.clientCertSource, nil, nil
- }
-
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackURL, err := url.Parse(config.endpoint); err == nil {
- if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackDialer: &s2a.FallbackDialer{
- Dialer: fallbackDialer,
- ServerAddr: fallbackServerAddr,
- },
- }
- }
- }
-
- dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
- })
- return nil, dialTLSContextFunc, nil
-}
-
-func getTransportConfig(opts *Options) (*transportConfig, error) {
- clientCertSource, err := getClientCertificateSource(opts)
- if err != nil {
- return nil, err
- }
- endpoint, err := getEndpoint(opts, clientCertSource)
- if err != nil {
- return nil, err
- }
- defaultTransportConfig := transportConfig{
- clientCertSource: clientCertSource,
- endpoint: endpoint,
- }
-
- if !shouldUseS2A(clientCertSource, opts) {
- return &defaultTransportConfig, nil
- }
- if !opts.isUniverseDomainGDU() {
- return nil, errUniverseNotSupportedMTLS
- }
-
- s2aMTLSEndpoint := opts.DefaultMTLSEndpoint
-
- s2aAddress := GetS2AAddress()
- if s2aAddress == "" {
- return &defaultTransportConfig, nil
- }
- return &transportConfig{
- clientCertSource: clientCertSource,
- endpoint: endpoint,
- s2aAddress: s2aAddress,
- s2aMTLSEndpoint: s2aMTLSEndpoint,
- }, nil
-}
-
-// getClientCertificateSource returns a default client certificate source, if
-// not provided by the user.
-//
-// A nil default source can be returned if the source does not exist. Any exceptions
-// encountered while initializing the default source will be reported as client
-// error (ex. corrupt metadata file).
-func getClientCertificateSource(opts *Options) (cert.Provider, error) {
- if !isClientCertificateEnabled() {
- return nil, nil
- } else if opts.ClientCertProvider != nil {
- return opts.ClientCertProvider, nil
- }
- return cert.DefaultProvider()
-
-}
-
-// isClientCertificateEnabled returns true by default, unless explicitly set to false via env var.
-func isClientCertificateEnabled() bool {
- if value, ok := os.LookupEnv(googleAPIUseCertSource); ok {
- // error as false is OK
- b, _ := strconv.ParseBool(value)
- return b
- }
- return true
-}
-
-type transportConfig struct {
- // The client certificate source.
- clientCertSource cert.Provider
- // The corresponding endpoint to use based on client certificate source.
- endpoint string
- // The S2A address if it can be used, otherwise an empty string.
- s2aAddress string
- // The MTLS endpoint to use with S2A.
- s2aMTLSEndpoint string
-}
-
-// getEndpoint returns the endpoint for the service, taking into account the
-// user-provided endpoint override "settings.Endpoint".
-//
-// If no endpoint override is specified, we will either return the default endpoint or
-// the default mTLS endpoint if a client certificate is available.
-//
-// You can override the default endpoint choice (mtls vs. regular) by setting the
-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
-//
-// If the endpoint override is an address (host:port) rather than full base
-// URL (ex. https://...), then the user-provided address will be merged into
-// the default endpoint. For example, WithEndpoint("myhost:8000") and
-// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
-func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
- if opts.Endpoint == "" {
- mtlsMode := getMTLSMode()
- if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
- if !opts.isUniverseDomainGDU() {
- return "", errUniverseNotSupportedMTLS
- }
- return opts.DefaultMTLSEndpoint, nil
- }
- return opts.defaultEndpoint(), nil
- }
- if strings.Contains(opts.Endpoint, "://") {
- // User passed in a full URL path, use it verbatim.
- return opts.Endpoint, nil
- }
- if opts.defaultEndpoint() == "" {
- // If DefaultEndpointTemplate is not configured,
- // use the user provided endpoint verbatim. This allows a naked
- // "host[:port]" URL to be used with GRPC Direct Path.
- return opts.Endpoint, nil
- }
-
- // Assume user-provided endpoint is host[:port], merge it with the default endpoint.
- return opts.mergedEndpoint()
-}
-
-func getMTLSMode() string {
- mode := os.Getenv(googleAPIUseMTLS)
- if mode == "" {
- mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated.
- }
- if mode == "" {
- return mTLSModeAuto
- }
- return strings.ToLower(mode)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
deleted file mode 100644
index 96582ce7b6a..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "errors"
- "sync"
-)
-
-// defaultCertData holds all the variables pertaining to
-// the default certificate provider created by [DefaultProvider].
-//
-// A singleton model is used to allow the provider to be reused
-// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil)
-// may be returned to indicate a default provider could not be found, which
-// will skip extra tls config in the transport layer .
-type defaultCertData struct {
- once sync.Once
- provider Provider
- err error
-}
-
-var (
- defaultCert defaultCertData
-)
-
-// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate.
-type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
-
-// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
-var errSourceUnavailable = errors.New("certificate source is unavailable")
-
-// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource.
-// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
-//
-// If neither source is available (due to missing configurations), a nil Source and a nil Error are
-// returned to indicate that a default certificate source is unavailable.
-func DefaultProvider() (Provider, error) {
- defaultCert.once.Do(func() {
- defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("")
- if errors.Is(defaultCert.err, errSourceUnavailable) {
- defaultCert.provider, defaultCert.err = NewSecureConnectProvider("")
- if errors.Is(defaultCert.err, errSourceUnavailable) {
- defaultCert.provider, defaultCert.err = nil, nil
- }
- }
- })
- return defaultCert.provider, defaultCert.err
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
deleted file mode 100644
index 36651591612..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "errors"
-
- "github.com/googleapis/enterprise-certificate-proxy/client"
-)
-
-type ecpSource struct {
- key *client.Key
-}
-
-// NewEnterpriseCertificateProxyProvider creates a certificate source
-// using the Enterprise Certificate Proxy client, which delegates
-// certifcate related operations to an OS-specific "signer binary"
-// that communicates with the native keystore (ex. keychain on MacOS).
-//
-// The configFilePath points to a config file containing relevant parameters
-// such as the certificate issuer and the location of the signer binary.
-// If configFilePath is empty, the client will attempt to load the config from
-// a well-known gcloud location.
-func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
- key, err := client.Cred(configFilePath)
- if err != nil {
- if errors.Is(err, client.ErrCredUnavailable) {
- return nil, errSourceUnavailable
- }
- return nil, err
- }
-
- return (&ecpSource{
- key: key,
- }).getClientCertificate, nil
-}
-
-func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- var cert tls.Certificate
- cert.PrivateKey = s.key
- cert.Certificate = s.key.CertificateChain()
- return &cert, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
deleted file mode 100644
index 3227aba280c..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "os/user"
- "path/filepath"
- "sync"
- "time"
-)
-
-const (
- metadataPath = ".secureConnect"
- metadataFile = "context_aware_metadata.json"
-)
-
-type secureConnectSource struct {
- metadata secureConnectMetadata
-
- // Cache the cert to avoid executing helper command repeatedly.
- cachedCertMutex sync.Mutex
- cachedCert *tls.Certificate
-}
-
-type secureConnectMetadata struct {
- Cmd []string `json:"cert_provider_command"`
-}
-
-// NewSecureConnectProvider creates a certificate source using
-// the Secure Connect Helper and its associated metadata file.
-//
-// The configFilePath points to the location of the context aware metadata file.
-// If configFilePath is empty, use the default context aware metadata location.
-func NewSecureConnectProvider(configFilePath string) (Provider, error) {
- if configFilePath == "" {
- user, err := user.Current()
- if err != nil {
- // Error locating the default config means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
- }
-
- file, err := os.ReadFile(configFilePath)
- if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- // Config file missing means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- return nil, err
- }
-
- var metadata secureConnectMetadata
- if err := json.Unmarshal(file, &metadata); err != nil {
- return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
- }
- if err := validateMetadata(metadata); err != nil {
- return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
- }
- return (&secureConnectSource{
- metadata: metadata,
- }).getClientCertificate, nil
-}
-
-func validateMetadata(metadata secureConnectMetadata) error {
- if len(metadata.Cmd) == 0 {
- return errors.New("empty cert_provider_command")
- }
- return nil
-}
-
-func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- s.cachedCertMutex.Lock()
- defer s.cachedCertMutex.Unlock()
- if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
- return s.cachedCert, nil
- }
- // Expand OS environment variables in the cert provider command such as "$HOME".
- for i := 0; i < len(s.metadata.Cmd); i++ {
- s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
- }
- command := s.metadata.Cmd
- data, err := exec.Command(command[0], command[1:]...).Output()
- if err != nil {
- return nil, err
- }
- cert, err := tls.X509KeyPair(data, data)
- if err != nil {
- return nil, err
- }
- s.cachedCert = &cert
- return &cert, nil
-}
-
-// isCertificateExpired returns true if the given cert is expired or invalid.
-func isCertificateExpired(cert *tls.Certificate) bool {
- if len(cert.Certificate) == 0 {
- return true
- }
- parsed, err := x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return true
- }
- return time.Now().After(parsed.NotAfter)
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
deleted file mode 100644
index 2ed532deb7a..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "encoding/json"
- "log"
- "os"
- "strconv"
- "sync"
- "time"
-
- "cloud.google.com/go/auth/internal/transport/cert"
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- configEndpointSuffix = "instance/platform-security/auto-mtls-configuration"
-)
-
-var (
- // The period an MTLS config can be reused before needing refresh.
- configExpiry = time.Hour
-
- // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source.
- mtlsOnce sync.Once
-)
-
-// GetS2AAddress returns the S2A address to be reached via plaintext connection.
-// Returns empty string if not set or invalid.
-func GetS2AAddress() string {
- c, err := getMetadataMTLSAutoConfig().Config()
- if err != nil {
- return ""
- }
- if !c.Valid() {
- return ""
- }
- return c.S2A.PlaintextAddress
-}
-
-type mtlsConfigSource interface {
- Config() (*mtlsConfig, error)
-}
-
-// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
-type mtlsConfig struct {
- S2A *s2aAddresses `json:"s2a"`
- Expiry time.Time
-}
-
-func (c *mtlsConfig) Valid() bool {
- return c != nil && c.S2A != nil && !c.expired()
-}
-func (c *mtlsConfig) expired() bool {
- return c.Expiry.Before(time.Now())
-}
-
-// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
-type s2aAddresses struct {
- // PlaintextAddress is the plaintext address to reach S2A
- PlaintextAddress string `json:"plaintext_address"`
- // MTLSAddress is the MTLS address to reach S2A
- MTLSAddress string `json:"mtls_address"`
-}
-
-// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh.
-func getMetadataMTLSAutoConfig() mtlsConfigSource {
- mtlsOnce.Do(func() {
- mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{
- src: &metadataMTLSAutoConfig{},
- }
- })
- return mdsMTLSAutoConfigSource
-}
-
-// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry.
-// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig.
-type reuseMTLSConfigSource struct {
- src mtlsConfigSource // src.Config() is called when config is expired
- mu sync.Mutex // mutex guards config
- config *mtlsConfig // cached config
-}
-
-func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
-
- if cs.config.Valid() {
- return cs.config, nil
- }
- c, err := cs.src.Config()
- if err != nil {
- return nil, err
- }
- cs.config = c
- return c, nil
-}
-
-// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource
-// It has the logic to query MDS and return an mtlsConfig
-type metadataMTLSAutoConfig struct{}
-
-var httpGetMetadataMTLSConfig = func() (string, error) {
- return metadata.Get(configEndpointSuffix)
-}
-
-func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) {
- resp, err := httpGetMetadataMTLSConfig()
- if err != nil {
- log.Printf("querying MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
- }
- var config mtlsConfig
- err = json.Unmarshal([]byte(resp), &config)
- if err != nil {
- log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
- }
-
- if config.S2A == nil {
- log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config)
- return defaultMTLSConfig(), nil
- }
-
- // set new expiry
- config.Expiry = time.Now().Add(configExpiry)
- return &config, nil
-}
-
-func defaultMTLSConfig() *mtlsConfig {
- return &mtlsConfig{
- S2A: &s2aAddresses{
- PlaintextAddress: "",
- MTLSAddress: "",
- },
- Expiry: time.Now().Add(configExpiry),
- }
-}
-
-func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
- // If client cert is found, use that over S2A.
- if clientCertSource != nil {
- return false
- }
- // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
- if !isGoogleS2AEnabled() {
- return false
- }
- // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A.
- if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" {
- return false
- }
- // If custom HTTP client is provided, skip S2A.
- if opts.Client != nil {
- return false
- }
- // If directPath is enabled, skip S2A.
- return !opts.EnableDirectPath && !opts.EnableDirectPathXds
-}
-
-func isGoogleS2AEnabled() bool {
- b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv))
- if err != nil {
- return false
- }
- return b
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/upstream/vendor/cloud.google.com/go/auth/internal/transport/transport.go
deleted file mode 100644
index b76386d3c0d..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/internal/transport/transport.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport provided internal helpers for the two transport packages
-// (grpctransport and httptransport).
-package transport
-
-import (
- "fmt"
-
- "cloud.google.com/go/auth/credentials"
-)
-
-// CloneDetectOptions clones a user set detect option into some new memory that
-// we can internally manipulate before sending onto the detect package.
-func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions {
- if oldDo == nil {
- // it is valid for users not to set this, but we will need to to default
- // some options for them in this case so return some initialized memory
- // to work with.
- return &credentials.DetectOptions{}
- }
- newDo := &credentials.DetectOptions{
- // Simple types
- Audience: oldDo.Audience,
- Subject: oldDo.Subject,
- EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
- TokenURL: oldDo.TokenURL,
- STSAudience: oldDo.STSAudience,
- CredentialsFile: oldDo.CredentialsFile,
- UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
- UniverseDomain: oldDo.UniverseDomain,
-
- // These fields are are pointer types that we just want to use exactly
- // as the user set, copy the ref
- Client: oldDo.Client,
- AuthHandlerOptions: oldDo.AuthHandlerOptions,
- }
-
- // Smartly size this memory and copy below.
- if oldDo.CredentialsJSON != nil {
- newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON))
- copy(newDo.CredentialsJSON, oldDo.CredentialsJSON)
- }
- if oldDo.Scopes != nil {
- newDo.Scopes = make([]string, len(oldDo.Scopes))
- copy(newDo.Scopes, oldDo.Scopes)
- }
-
- return newDo
-}
-
-// ValidateUniverseDomain verifies that the universe domain configured for the
-// client matches the universe domain configured for the credentials.
-func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error {
- if clientUniverseDomain != credentialsUniverseDomain {
- return fmt.Errorf(
- "the configured universe domain (%q) does not match the universe "+
- "domain found in the credentials (%q). If you haven't configured "+
- "the universe domain explicitly, \"googleapis.com\" is the default",
- clientUniverseDomain,
- credentialsUniverseDomain)
- }
- return nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
deleted file mode 100644
index ff9747beda0..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Changelog
-
-## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-
-## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
-
-## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)
-
-
-### Features
-
-* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## 0.1.0 (2023-10-19)
-
-
-### Features
-
-* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f))
-* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
deleted file mode 100644
index 9835ac571cf..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth]
-// and [golang.org/x/oauth2].
-package oauth2adapt
-
-import (
- "context"
- "encoding/json"
- "errors"
-
- "cloud.google.com/go/auth"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
-)
-
-// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
-// into a [cloud.google.com/go/auth.TokenProvider].
-func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
- return &tokenProviderAdapter{ts: ts}
-}
-
-type tokenProviderAdapter struct {
- ts oauth2.TokenSource
-}
-
-// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It
-// is a light wrapper around the underlying TokenSource.
-func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
- tok, err := tp.ts.Token()
- if err != nil {
- var err2 *oauth2.RetrieveError
- if ok := errors.As(err, &err2); ok {
- return nil, AuthErrorFromRetrieveError(err2)
- }
- return nil, err
- }
- return &auth.Token{
- Value: tok.AccessToken,
- Type: tok.Type(),
- Expiry: tok.Expiry,
- }, nil
-}
-
-// TokenSourceFromTokenProvider converts any
-// [cloud.google.com/go/auth.TokenProvider] into a
-// [golang.org/x/oauth2.TokenSource].
-func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource {
- return &tokenSourceAdapter{tp: tp}
-}
-
-type tokenSourceAdapter struct {
- tp auth.TokenProvider
-}
-
-// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It
-// is a light wrapper around the underlying TokenProvider.
-func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
- tok, err := ts.tp.Token(context.Background())
- if err != nil {
- var err2 *auth.Error
- if ok := errors.As(err, &err2); ok {
- return nil, AddRetrieveErrorToAuthError(err2)
- }
- return nil, err
- }
- return &oauth2.Token{
- AccessToken: tok.Value,
- TokenType: tok.Type,
- Expiry: tok.Expiry,
- }, nil
-}
-
-// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
-// to a [cloud.google.com/go/auth.Credentials].
-func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials {
- if creds == nil {
- return nil
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: TokenProviderFromTokenSource(creds.TokenSource),
- JSON: creds.JSON,
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
- return creds.ProjectID, nil
- }),
- UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
- return creds.GetUniverseDomain()
- }),
- })
-}
-
-// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials]
-// to a [golang.org/x/oauth2/google.Credentials].
-func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials {
- if creds == nil {
- return nil
- }
- // Throw away errors as old credentials are not request aware. Also, no
- // network requests are currently happening for this use case.
- projectID, _ := creds.ProjectID(context.Background())
-
- return &google.Credentials{
- TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider),
- ProjectID: projectID,
- JSON: creds.JSON(),
- UniverseDomainProvider: func() (string, error) {
- return creds.UniverseDomain(context.Background())
- },
- }
-}
-
-type oauth2Error struct {
- ErrorCode string `json:"error"`
- ErrorDescription string `json:"error_description"`
- ErrorURI string `json:"error_uri"`
-}
-
-// AddRetrieveErrorToAuthError returns the same error provided and adds a
-// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the
-// [cloud.google.com/go/auth.Error].
-func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error {
- if err == nil {
- return nil
- }
- e := &oauth2.RetrieveError{
- Response: err.Response,
- Body: err.Body,
- }
- err.Err = e
- if len(err.Body) > 0 {
- var oErr oauth2Error
- // ignore the error as it only fills in extra details
- json.Unmarshal(err.Body, &oErr)
- e.ErrorCode = oErr.ErrorCode
- e.ErrorDescription = oErr.ErrorDescription
- e.ErrorURI = oErr.ErrorURI
- }
- return err
-}
-
-// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that
-// wraps the provided [golang.org/x/oauth2.RetrieveError].
-func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error {
- if err == nil {
- return nil
- }
- return &auth.Error{
- Response: err.Response,
- Body: err.Body,
- Err: err,
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/auth/threelegged.go b/upstream/vendor/cloud.google.com/go/auth/threelegged.go
deleted file mode 100644
index 1b8d83c4b4f..00000000000
--- a/upstream/vendor/cloud.google.com/go/auth/threelegged.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "mime"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
-// OAuth consent at the specified auth code URL and returns an auth code and
-// state upon approval.
-type AuthorizationHandler func(authCodeURL string) (code string, state string, err error)
-
-// Options3LO are the options for doing a 3-legged OAuth2 flow.
-type Options3LO struct {
- // ClientID is the application's ID.
- ClientID string
- // ClientSecret is the application's secret. Not required if AuthHandlerOpts
- // is set.
- ClientSecret string
- // AuthURL is the URL for authenticating.
- AuthURL string
- // TokenURL is the URL for retrieving a token.
- TokenURL string
- // AuthStyle is used to describe how to client info in the token request.
- AuthStyle Style
- // RefreshToken is the token used to refresh the credential. Not required
- // if AuthHandlerOpts is set.
- RefreshToken string
- // RedirectURL is the URL to redirect users to. Optional.
- RedirectURL string
- // Scopes specifies requested permissions for the Token. Optional.
- Scopes []string
-
- // URLParams are the set of values to apply to the token exchange. Optional.
- URLParams url.Values
- // Client is the client to be used to make the underlying token requests.
- // Optional.
- Client *http.Client
- // EarlyTokenExpiry is the time before the token expires that it should be
- // refreshed. If not set the default value is 10 seconds. Optional.
- EarlyTokenExpiry time.Duration
-
- // AuthHandlerOpts provides a set of options for doing a
- // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
- AuthHandlerOpts *AuthorizationHandlerOptions
-}
-
-func (o *Options3LO) validate() error {
- if o == nil {
- return errors.New("auth: options must be provided")
- }
- if o.ClientID == "" {
- return errors.New("auth: client ID must be provided")
- }
- if o.AuthHandlerOpts == nil && o.ClientSecret == "" {
- return errors.New("auth: client secret must be provided")
- }
- if o.AuthURL == "" {
- return errors.New("auth: auth URL must be provided")
- }
- if o.TokenURL == "" {
- return errors.New("auth: token URL must be provided")
- }
- if o.AuthStyle == StyleUnknown {
- return errors.New("auth: auth style must be provided")
- }
- if o.AuthHandlerOpts == nil && o.RefreshToken == "" {
- return errors.New("auth: refresh token must be provided")
- }
- return nil
-}
-
-// PKCEOptions holds parameters to support PKCE.
-type PKCEOptions struct {
- // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
- Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier.
- // ChallengeMethod is the encryption method (ex. S256).
- ChallengeMethod string
- // Verifier is the original, non-encrypted secret.
- Verifier string // The original, non-encrypted secret.
-}
-
-type tokenJSON struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn int `json:"expires_in"`
- // error fields
- ErrorCode string `json:"error"`
- ErrorDescription string `json:"error_description"`
- ErrorURI string `json:"error_uri"`
-}
-
-func (e *tokenJSON) expiry() (t time.Time) {
- if v := e.ExpiresIn; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
- return
-}
-
-func (o *Options3LO) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-// authCodeURL returns a URL that points to a OAuth2 consent page.
-func (o *Options3LO) authCodeURL(state string, values url.Values) string {
- var buf bytes.Buffer
- buf.WriteString(o.AuthURL)
- v := url.Values{
- "response_type": {"code"},
- "client_id": {o.ClientID},
- }
- if o.RedirectURL != "" {
- v.Set("redirect_uri", o.RedirectURL)
- }
- if len(o.Scopes) > 0 {
- v.Set("scope", strings.Join(o.Scopes, " "))
- }
- if state != "" {
- v.Set("state", state)
- }
- if o.AuthHandlerOpts != nil {
- if o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.Challenge != "" {
- v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge)
- }
- if o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" {
- v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod)
- }
- }
- for k := range values {
- v.Set(k, v.Get(k))
- }
- if strings.Contains(o.AuthURL, "?") {
- buf.WriteByte('&')
- } else {
- buf.WriteByte('?')
- }
- buf.WriteString(v.Encode())
- return buf.String()
-}
-
-// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2
-// configuration. The TokenProvider is caches and auto-refreshes tokens by
-// default.
-func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- if opts.AuthHandlerOpts != nil {
- return new3LOTokenProviderWithAuthHandler(opts), nil
- }
- return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenExpiry,
- }), nil
-}
-
-// AuthorizationHandlerOptions provides a set of options to specify for doing a
-// 3-legged OAuth2 flow with a custom [AuthorizationHandler].
-type AuthorizationHandlerOptions struct {
- // AuthorizationHandler specifies the handler used to for the authorization
- // part of the flow.
- Handler AuthorizationHandler
- // State is used verify that the "state" is identical in the request and
- // response before exchanging the auth code for OAuth2 token.
- State string
- // PKCEOpts allows setting configurations for PKCE. Optional.
- PKCEOpts *PKCEOptions
-}
-
-func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider {
- return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenExpiry,
- })
-}
-
-// exchange handles the final exchange portion of the 3lo flow. Returns a Token,
-// refreshToken, and error.
-func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) {
- // Build request
- v := url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- }
- if o.RedirectURL != "" {
- v.Set("redirect_uri", o.RedirectURL)
- }
- if o.AuthHandlerOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.Verifier != "" {
- v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier)
- }
- for k := range o.URLParams {
- v.Set(k, o.URLParams.Get(k))
- }
- return fetchToken(ctx, o, v)
-}
-
-// This struct is not safe for concurrent access alone, but the way it is used
-// in this package by wrapping it with a cachedTokenProvider makes it so.
-type tokenProvider3LO struct {
- opts *Options3LO
- client *http.Client
- refreshToken string
-}
-
-func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) {
- if tp.refreshToken == "" {
- return nil, errors.New("auth: token expired and refresh token is not set")
- }
- v := url.Values{
- "grant_type": {"refresh_token"},
- "refresh_token": {tp.refreshToken},
- }
- for k := range tp.opts.URLParams {
- v.Set(k, tp.opts.URLParams.Get(k))
- }
-
- tk, rt, err := fetchToken(ctx, tp.opts, v)
- if err != nil {
- return nil, err
- }
- if tp.refreshToken != rt && rt != "" {
- tp.refreshToken = rt
- }
- return tk, err
-}
-
-type tokenProviderWithHandler struct {
- opts *Options3LO
- state string
-}
-
-func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) {
- url := tp.opts.authCodeURL(tp.state, nil)
- code, state, err := tp.opts.AuthHandlerOpts.Handler(url)
- if err != nil {
- return nil, err
- }
- if state != tp.state {
- return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow")
- }
- tok, _, err := tp.opts.exchange(ctx, code)
- return tok, err
-}
-
-// fetchToken returns a Token, refresh token, and/or an error.
-func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) {
- var refreshToken string
- if o.AuthStyle == StyleInParams {
- if o.ClientID != "" {
- v.Set("client_id", o.ClientID)
- }
- if o.ClientSecret != "" {
- v.Set("client_secret", o.ClientSecret)
- }
- }
- req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode()))
- if err != nil {
- return nil, refreshToken, err
- }
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if o.AuthStyle == StyleInHeader {
- req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
- }
-
- // Make request
- r, err := o.client().Do(req.WithContext(ctx))
- if err != nil {
- return nil, refreshToken, err
- }
- body, err := internal.ReadAll(r.Body)
- r.Body.Close()
- if err != nil {
- return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
-
- failureStatus := r.StatusCode < 200 || r.StatusCode > 299
- tokError := &Error{
- Response: r,
- Body: body,
- }
-
- var token *Token
- // errors ignored because of default switch on content
- content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
- switch content {
- case "application/x-www-form-urlencoded", "text/plain":
- // some endpoints return a query string
- vals, err := url.ParseQuery(string(body))
- if err != nil {
- if failureStatus {
- return nil, refreshToken, tokError
- }
- return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err)
- }
- tokError.code = vals.Get("error")
- tokError.description = vals.Get("error_description")
- tokError.uri = vals.Get("error_uri")
- token = &Token{
- Value: vals.Get("access_token"),
- Type: vals.Get("token_type"),
- Metadata: make(map[string]interface{}, len(vals)),
- }
- for k, v := range vals {
- token.Metadata[k] = v
- }
- refreshToken = vals.Get("refresh_token")
- e := vals.Get("expires_in")
- expires, _ := strconv.Atoi(e)
- if expires != 0 {
- token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
- }
- default:
- var tj tokenJSON
- if err = json.Unmarshal(body, &tj); err != nil {
- if failureStatus {
- return nil, refreshToken, tokError
- }
- return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err)
- }
- tokError.code = tj.ErrorCode
- tokError.description = tj.ErrorDescription
- tokError.uri = tj.ErrorURI
- token = &Token{
- Value: tj.AccessToken,
- Type: tj.TokenType,
- Expiry: tj.expiry(),
- Metadata: make(map[string]interface{}),
- }
- json.Unmarshal(body, &token.Metadata) // optional field, skip err check
- refreshToken = tj.RefreshToken
- }
- // according to spec, servers should respond status 400 in error case
- // https://www.rfc-editor.org/rfc/rfc6749#section-5.2
- // but some unorthodox servers respond 200 in error case
- if failureStatus || tokError.code != "" {
- return nil, refreshToken, tokError
- }
- if token.Value == "" {
- return nil, refreshToken, errors.New("auth: server response missing access_token")
- }
- return token, refreshToken, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/LICENSE b/upstream/vendor/cloud.google.com/go/compute/LICENSE
similarity index 100%
rename from upstream/vendor/cloud.google.com/go/LICENSE
rename to upstream/vendor/cloud.google.com/go/compute/LICENSE
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/upstream/vendor/cloud.google.com/go/compute/internal/version.go
similarity index 66%
rename from upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
rename to upstream/vendor/cloud.google.com/go/compute/internal/version.go
index 74689acbbbf..291a237fe1c 100644
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
+++ b/upstream/vendor/cloud.google.com/go/compute/internal/version.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,17 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux
+package internal
-package metadata
-
-import (
- "os"
- "strings"
-)
-
-func systemInfoSuggestsGCE() bool {
- b, _ := os.ReadFile("/sys/class/dmi/id/product_name")
- name := strings.TrimSpace(string(b))
- return name == "Google" || name == "Google Compute Engine"
-}
+// Version is the current tagged release of the library.
+const Version = "1.24.0"
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/upstream/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 9594e1e2793..06b957349af 100644
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/upstream/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,31 +1,5 @@
# Changes
-## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
-
-
-### Features
-
-* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302))
-
-## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01)
-
-
-### Features
-
-* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3))
-
-
-### Documentation
-
-* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f))
-
-## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
-
-
-### Features
-
-* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510))
-
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/metadata.go b/upstream/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 345080b7297..c17faa142a4 100644
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/upstream/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -23,11 +23,12 @@ import (
"context"
"encoding/json"
"fmt"
- "io"
+ "io/ioutil"
"net"
"net/http"
"net/url"
"os"
+ "runtime"
"strings"
"sync"
"time"
@@ -87,16 +88,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
-func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) {
+func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
- v, err = cl.getTrimmed(ctx, c.k)
+ v, err = cl.getTrimmed(c.k)
} else {
- v, err = cl.GetWithContext(ctx, c.k)
+ v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
@@ -109,9 +110,7 @@ var (
onGCE bool
)
-// OnGCE reports whether this process is running on Google Compute Platforms.
-// NOTE: True returned from `OnGCE` does not guarantee that the metadata server
-// is accessible from this process and have all the metadata defined.
+// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
@@ -189,213 +188,78 @@ func testOnGCE() bool {
return <-resc
}
-// Subscribe calls Client.SubscribeWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [SubscribeWithContext].
-func Subscribe(suffix string, fn func(v string, ok bool) error) error {
- return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
-}
-
-// SubscribeWithContext calls Client.SubscribeWithContext on the default client.
-func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
- return defaultClient.SubscribeWithContext(ctx, suffix, fn)
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+ if runtime.GOOS != "linux" {
+ // We don't have any non-Linux clues available, at least yet.
+ return false
+ }
+ slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+ name := strings.TrimSpace(string(slurp))
+ return name == "Google" || name == "Google Compute Engine"
}
-// Get calls Client.GetWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [GetWithContext].
-func Get(suffix string) (string, error) {
- return defaultClient.GetWithContext(context.Background(), suffix)
+// Subscribe calls Client.Subscribe on the default client.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ return defaultClient.Subscribe(suffix, fn)
}
-// GetWithContext calls Client.GetWithContext on the default client.
-func GetWithContext(ctx context.Context, suffix string) (string, error) {
- return defaultClient.GetWithContext(ctx, suffix)
-}
+// Get calls Client.Get on the default client.
+func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
-//
-// Deprecated: Please use the context aware variant [ProjectIDWithContext].
-func ProjectID() (string, error) {
- return defaultClient.ProjectIDWithContext(context.Background())
-}
-
-// ProjectIDWithContext returns the current instance's project ID string.
-func ProjectIDWithContext(ctx context.Context) (string, error) {
- return defaultClient.ProjectIDWithContext(ctx)
-}
+func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
-//
-// Deprecated: Please use the context aware variant [NumericProjectIDWithContext].
-func NumericProjectID() (string, error) {
- return defaultClient.NumericProjectIDWithContext(context.Background())
-}
-
-// NumericProjectIDWithContext returns the current instance's numeric project ID.
-func NumericProjectIDWithContext(ctx context.Context) (string, error) {
- return defaultClient.NumericProjectIDWithContext(ctx)
-}
+func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
-//
-// Deprecated: Please use the context aware variant [InternalIPWithContext].
-func InternalIP() (string, error) {
- return defaultClient.InternalIPWithContext(context.Background())
-}
-
-// InternalIPWithContext returns the instance's primary internal IP address.
-func InternalIPWithContext(ctx context.Context) (string, error) {
- return defaultClient.InternalIPWithContext(ctx)
-}
+func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
-//
-// Deprecated: Please use the context aware variant [ExternalIPWithContext].
-func ExternalIP() (string, error) {
- return defaultClient.ExternalIPWithContext(context.Background())
-}
-
-// ExternalIPWithContext returns the instance's primary external (public) IP address.
-func ExternalIPWithContext(ctx context.Context) (string, error) {
- return defaultClient.ExternalIPWithContext(ctx)
-}
-
-// Email calls Client.EmailWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [EmailWithContext].
-func Email(serviceAccount string) (string, error) {
- return defaultClient.EmailWithContext(context.Background(), serviceAccount)
-}
+func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
-// EmailWithContext calls Client.EmailWithContext on the default client.
-func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
- return defaultClient.EmailWithContext(ctx, serviceAccount)
-}
+// Email calls Client.Email on the default client.
+func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
-//
-// Deprecated: Please use the context aware variant [HostnameWithContext].
-func Hostname() (string, error) {
- return defaultClient.HostnameWithContext(context.Background())
-}
-
-// HostnameWithContext returns the instance's hostname. This will be of the form
-// ".c..internal".
-func HostnameWithContext(ctx context.Context) (string, error) {
- return defaultClient.HostnameWithContext(ctx)
-}
+func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
-//
-// Deprecated: Please use the context aware variant [InstanceTagsWithContext].
-func InstanceTags() ([]string, error) {
- return defaultClient.InstanceTagsWithContext(context.Background())
-}
-
-// InstanceTagsWithContext returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
-func InstanceTagsWithContext(ctx context.Context) ([]string, error) {
- return defaultClient.InstanceTagsWithContext(ctx)
-}
+func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
-//
-// Deprecated: Please use the context aware variant [InstanceIDWithContext].
-func InstanceID() (string, error) {
- return defaultClient.InstanceIDWithContext(context.Background())
-}
-
-// InstanceIDWithContext returns the current VM's numeric instance ID.
-func InstanceIDWithContext(ctx context.Context) (string, error) {
- return defaultClient.InstanceIDWithContext(ctx)
-}
+func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
-//
-// Deprecated: Please use the context aware variant [InstanceNameWithContext].
-func InstanceName() (string, error) {
- return defaultClient.InstanceNameWithContext(context.Background())
-}
-
-// InstanceNameWithContext returns the current VM's instance ID string.
-func InstanceNameWithContext(ctx context.Context) (string, error) {
- return defaultClient.InstanceNameWithContext(ctx)
-}
+func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
-//
-// Deprecated: Please use the context aware variant [ZoneWithContext].
-func Zone() (string, error) {
- return defaultClient.ZoneWithContext(context.Background())
-}
-
-// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
-func ZoneWithContext(ctx context.Context) (string, error) {
- return defaultClient.ZoneWithContext(ctx)
-}
+func Zone() (string, error) { return defaultClient.Zone() }
-// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [InstanceAttributesWithContext.
-func InstanceAttributes() ([]string, error) {
- return defaultClient.InstanceAttributesWithContext(context.Background())
-}
-
-// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
-func InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
- return defaultClient.InstanceAttributesWithContext(ctx)
-}
-
-// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [ProjectAttributesWithContext].
-func ProjectAttributes() ([]string, error) {
- return defaultClient.ProjectAttributesWithContext(context.Background())
-}
+// InstanceAttributes calls Client.InstanceAttributes on the default client.
+func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
-// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
-func ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
- return defaultClient.ProjectAttributesWithContext(ctx)
-}
+// ProjectAttributes calls Client.ProjectAttributes on the default client.
+func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
-// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext].
+// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
- return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr)
-}
-
-// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client.
-func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
- return defaultClient.InstanceAttributeValueWithContext(ctx, attr)
+ return defaultClient.InstanceAttributeValue(attr)
}
-// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext].
+// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
- return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr)
-}
-
-// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client.
-func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
- return defaultClient.ProjectAttributeValueWithContext(ctx, attr)
-}
-
-// Scopes calls Client.ScopesWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [ScopesWithContext].
-func Scopes(serviceAccount string) ([]string, error) {
- return defaultClient.ScopesWithContext(context.Background(), serviceAccount)
+ return defaultClient.ProjectAttributeValue(attr)
}
-// ScopesWithContext calls Client.ScopesWithContext on the default client.
-func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
- return defaultClient.ScopesWithContext(ctx, serviceAccount)
-}
+// Scopes calls Client.Scopes on the default client.
+func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
@@ -418,12 +282,14 @@ func NewClient(c *http.Client) *Client {
if c == nil {
return defaultClient
}
+
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
-func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) {
+func (c *Client) getETag(suffix string) (value, etag string, err error) {
+ ctx := context.TODO()
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
@@ -440,7 +306,7 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
}
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix
- req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
+ req, err := http.NewRequest("GET", u, nil)
if err != nil {
return "", "", err
}
@@ -470,7 +336,7 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := io.ReadAll(res.Body)
+ all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
@@ -488,37 +354,19 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
-//
-// Deprecated: Please use the context aware variant [Client.GetWithContext].
func (c *Client) Get(suffix string) (string, error) {
- return c.GetWithContext(context.Background(), suffix)
-}
-
-// GetWithContext returns a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
-// 169.254.169.254 will be used instead.
-//
-// If the requested metadata is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// NOTE: Without an extra deadline in the context this call can take in the
-// worst case, with internal backoff retries, up to 15 seconds (e.g. when server
-// is responding slowly). Pass context with additional timeouts when needed.
-func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
- val, _, err := c.getETag(ctx, suffix)
+ val, _, err := c.getETag(suffix)
return val, err
}
-func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) {
- s, err = c.GetWithContext(ctx, suffix)
+func (c *Client) getTrimmed(suffix string) (s string, err error) {
+ s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
-func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) {
- j, err := c.GetWithContext(ctx, suffix)
+func (c *Client) lines(suffix string) ([]string, error) {
+ j, err := c.Get(suffix)
if err != nil {
return nil, err
}
@@ -530,104 +378,45 @@ func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) {
}
// ProjectID returns the current instance's project ID string.
-//
-// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext].
-func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) }
-
-// ProjectIDWithContext returns the current instance's project ID string.
-func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) }
+func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
-//
-// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext].
-func (c *Client) NumericProjectID() (string, error) {
- return c.NumericProjectIDWithContext(context.Background())
-}
-
-// NumericProjectIDWithContext returns the current instance's numeric project ID.
-func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) {
- return projNum.get(ctx, c)
-}
+func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
-//
-// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext].
-func (c *Client) InstanceID() (string, error) {
- return c.InstanceIDWithContext(context.Background())
-}
-
-// InstanceIDWithContext returns the current VM's numeric instance ID.
-func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) {
- return instID.get(ctx, c)
-}
+func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
-//
-// Deprecated: Please use the context aware variant [Client.InternalIPWithContext].
func (c *Client) InternalIP() (string, error) {
- return c.InternalIPWithContext(context.Background())
-}
-
-// InternalIPWithContext returns the instance's primary internal IP address.
-func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) {
- return c.getTrimmed(ctx, "instance/network-interfaces/0/ip")
+ return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
-//
-// Deprecated: Please use the context aware variant [Client.EmailWithContext].
+// The account may be empty or the string "default" to use the instance's
+// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
- return c.EmailWithContext(context.Background(), serviceAccount)
-}
-
-// EmailWithContext returns the email address associated with the service account.
-// The serviceAccount parameter default value (empty string or "default" value)
-// will use the instance's main account.
-func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email")
+ return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
-//
-// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext].
func (c *Client) ExternalIP() (string, error) {
- return c.ExternalIPWithContext(context.Background())
-}
-
-// ExternalIPWithContext returns the instance's primary external (public) IP address.
-func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) {
- return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip")
+ return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
-//
-// Deprecated: Please use the context aware variant [Client.HostnameWithContext].
func (c *Client) Hostname() (string, error) {
- return c.HostnameWithContext(context.Background())
-}
-
-// HostnameWithContext returns the instance's hostname. This will be of the form
-// ".c..internal".
-func (c *Client) HostnameWithContext(ctx context.Context) (string, error) {
- return c.getTrimmed(ctx, "instance/hostname")
+ return c.getTrimmed("instance/hostname")
}
-// InstanceTags returns the list of user-defined instance tags.
-//
-// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext].
-func (c *Client) InstanceTags() ([]string, error) {
- return c.InstanceTagsWithContext(context.Background())
-}
-
-// InstanceTagsWithContext returns the list of user-defined instance tags,
+// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
-func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) {
+func (c *Client) InstanceTags() ([]string, error) {
var s []string
- j, err := c.GetWithContext(ctx, "instance/tags")
+ j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
@@ -638,27 +427,13 @@ func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error)
}
// InstanceName returns the current VM's instance ID string.
-//
-// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext].
func (c *Client) InstanceName() (string, error) {
- return c.InstanceNameWithContext(context.Background())
-}
-
-// InstanceNameWithContext returns the current VM's instance ID string.
-func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) {
- return c.getTrimmed(ctx, "instance/name")
+ return c.getTrimmed("instance/name")
}
// Zone returns the current VM's zone, such as "us-central1-b".
-//
-// Deprecated: Please use the context aware variant [Client.ZoneWithContext].
func (c *Client) Zone() (string, error) {
- return c.ZoneWithContext(context.Background())
-}
-
-// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
-func (c *Client) ZoneWithContext(ctx context.Context) (string, error) {
- zone, err := c.getTrimmed(ctx, "instance/zone")
+ zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects//zones/".
if err != nil {
return "", err
@@ -669,34 +444,12 @@ func (c *Client) ZoneWithContext(ctx context.Context) (string, error) {
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
-//
-// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext].
-func (c *Client) InstanceAttributes() ([]string, error) {
- return c.InstanceAttributesWithContext(context.Background())
-}
-
-// InstanceAttributesWithContext returns the list of user-defined attributes,
-// assigned when initially creating a GCE VM instance. The value of an
-// attribute can be obtained with InstanceAttributeValue.
-func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
- return c.lines(ctx, "instance/attributes/")
-}
+func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
-//
-// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext].
-func (c *Client) ProjectAttributes() ([]string, error) {
- return c.ProjectAttributesWithContext(context.Background())
-}
-
-// ProjectAttributesWithContext returns the list of user-defined attributes
-// applying to the project as a whole, not just this VM. The value of
-// an attribute can be obtained with ProjectAttributeValue.
-func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
- return c.lines(ctx, "project/attributes/")
-}
+func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
@@ -706,22 +459,8 @@ func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, er
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
-//
-// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext].
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
- return c.InstanceAttributeValueWithContext(context.Background(), attr)
-}
-
-// InstanceAttributeValueWithContext returns the value of the provided VM
-// instance attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// InstanceAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
- return c.GetWithContext(ctx, "instance/attributes/"+attr)
+ return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
@@ -732,71 +471,39 @@ func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr str
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
-//
-// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext].
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
- return c.ProjectAttributeValueWithContext(context.Background(), attr)
-}
-
-// ProjectAttributeValueWithContext returns the value of the provided
-// project attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// ProjectAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
- return c.GetWithContext(ctx, "project/attributes/"+attr)
+ return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
-//
-// Deprecated: Please use the context aware variant [Client.ScopesWithContext].
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
- return c.ScopesWithContext(context.Background(), serviceAccount)
-}
-
-// ScopesWithContext returns the service account scopes for the given account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
-func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes")
+ return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
-// Deprecated: Please use the context aware variant [Client.SubscribeWithContext].
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
- return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
-}
-
-// SubscribeWithContext subscribes to a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-// The suffix may contain query parameters.
-//
-// SubscribeWithContext calls fn with the latest metadata value indicated by the
-// provided suffix. If the metadata value is deleted, fn is called with the
-// empty string and ok false. Subscribe blocks until fn returns a non-nil error
-// or the value is deleted. Subscribe returns the error value returned from the
-// last call to fn, which may be nil when ok == false.
-func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
- val, lastETag, err := c.getETag(ctx, suffix)
+ val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
- if err := fn(ctx, val, true); err != nil {
+ if err := fn(val, true); err != nil {
return err
}
@@ -807,7 +514,7 @@ func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn fun
suffix += "?wait_for_change=true&last_etag="
}
for {
- val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag))
+ val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@@ -817,7 +524,7 @@ func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn fun
}
lastETag = etag
- if err := fn(ctx, val, ok); err != nil || !ok {
+ if err := fn(val, ok); err != nil || !ok {
return err
}
}
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/retry.go b/upstream/vendor/cloud.google.com/go/compute/metadata/retry.go
index 3d4bc75ddf2..0f18f3cda1e 100644
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/retry.go
+++ b/upstream/vendor/cloud.google.com/go/compute/metadata/retry.go
@@ -27,7 +27,7 @@ const (
)
var (
- syscallRetryable = func(error) bool { return false }
+ syscallRetryable = func(err error) bool { return false }
)
// defaultBackoff is basically equivalent to gax.Backoff without the need for
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck.go
deleted file mode 100644
index e0704fa6477..00000000000
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !linux
-
-package metadata
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
- // We don't currently have checks for other GOOS
- return false
-}
diff --git a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
deleted file mode 100644
index c0ce627872f..00000000000
--- a/upstream/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-
-package metadata
-
-import (
- "strings"
-
- "golang.org/x/sys/windows/registry"
-)
-
-func systemInfoSuggestsGCE() bool {
- k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE)
- if err != nil {
- return false
- }
- defer k.Close()
-
- s, _, err := k.GetStringValue("SystemProductName")
- if err != nil {
- return false
- }
- s = strings.TrimSpace(s)
- return strings.HasPrefix(s, "Google")
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/tidyfix.go b/upstream/vendor/cloud.google.com/go/compute/metadata/tidyfix.go
similarity index 93%
rename from upstream/vendor/cloud.google.com/go/longrunning/tidyfix.go
rename to upstream/vendor/cloud.google.com/go/compute/metadata/tidyfix.go
index d9a07f99e0d..4cef4850081 100644
--- a/upstream/vendor/cloud.google.com/go/longrunning/tidyfix.go
+++ b/upstream/vendor/cloud.google.com/go/compute/metadata/tidyfix.go
@@ -17,7 +17,7 @@
//go:build modhack
// +build modhack
-package longrunning
+package metadata
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
-import _ "cloud.google.com/go"
+import _ "cloud.google.com/go/compute/internal"
diff --git a/upstream/vendor/cloud.google.com/go/debug.md b/upstream/vendor/cloud.google.com/go/debug.md
deleted file mode 100644
index beec9155525..00000000000
--- a/upstream/vendor/cloud.google.com/go/debug.md
+++ /dev/null
@@ -1,404 +0,0 @@
-# Logging, Debugging and Telemetry
-
-**Warning: The OpenCensus project is obsolete and was archived on July 31st,
-2023.** This means that any security vulnerabilities that are found will not be
-patched. We recommend that you begin migrating to OpenCensus tracing to
-OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for
-details.
-
-Logging, debugging and telemetry all capture data that can be used for
-troubleshooting. Logging records specific events and transactions. Debugging
-exposes values for immediate analysis. Telemetry is suitable for production use
-and can serve both logging and monitoring purposes. Telemetry tracing follows
-requests through a system to provide a view of component interactions. Telemetry
-metrics collects data for significant performance indicators, offering insights
-into a system's health.
-
-## Logging and debugging
-
-While working with the Go Client Libraries you may run into some situations
-where you need a deeper level of understanding about what is going on in order
-to solve your problem. Here are some tips and tricks that you can use in these
-cases. *Note* that many of the tips in this section will have a performance
-impact and are therefore not recommended for sustained production use. Use these
-tips locally or in production for a *limited time* to help get a better
-understanding of what is going on.
-
-### HTTP based clients
-
-All of our auto-generated clients have a constructor to create a client that
-uses HTTP/JSON instead of gRPC. Additionally a couple of our hand-written
-clients like Storage and Bigquery are also HTTP based. Here are some tips for
-debugging these clients.
-
-#### Try setting Go's HTTP debug variable
-
-Try setting the following environment variable for verbose Go HTTP logging:
-GODEBUG=http2debug=1. To read more about this feature please see the godoc for
-[net/http](https://pkg.go.dev/net/http).
-
-*WARNING*: Enabling this debug variable will log headers and payloads which may
-contain private information.
-
-#### Add in your own logging with an HTTP middleware
-
-You may want to add in your own logging around HTTP requests. One way to do this
-is to register a custom HTTP client with a logging transport built in. Here is
-an example of how you would do this with the storage client.
-
-*WARNING*: Adding this middleware will log headers and payloads which may
-contain private information.
-
-```go
-package main
-
-import (
- "context"
- "fmt"
- "log"
- "net/http"
- "net/http/httputil"
-
- "cloud.google.com/go/storage"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- htransport "google.golang.org/api/transport/http"
-)
-
-type loggingRoundTripper struct {
- rt http.RoundTripper
-}
-
-func (d loggingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
- // Will create a dump of the request and body.
- dump, err := httputil.DumpRequest(r, true)
- if err != nil {
- log.Println("error dumping request")
- }
- log.Printf("%s", dump)
- return d.rt.RoundTrip(r)
-}
-
-func main() {
- ctx := context.Background()
-
- // Create a transport with authentication built-in detected with
- // [ADC](https://google.aip.dev/auth/4110). Note you will have to pass any
- // required scoped for the client you are using.
- trans, err := htransport.NewTransport(ctx,
- http.DefaultTransport,
- option.WithScopes(storage.ScopeFullControl),
- )
- if err != nil {
- log.Fatal(err)
- }
-
- // Embed customized transport into an HTTP client.
- hc := &http.Client{
- Transport: loggingRoundTripper{rt: trans},
- }
-
- // Supply custom HTTP client for use by the library.
- client, err := storage.NewClient(ctx, option.WithHTTPClient(hc))
- if err != nil {
- log.Fatal(err)
- }
- defer client.Close()
- // Use the client
-}
-```
-
-### gRPC based clients
-
-#### Try setting grpc-go's debug variables
-
-Try setting the following environment variables for grpc-go:
-`GRPC_GO_LOG_VERBOSITY_LEVEL=99` `GRPC_GO_LOG_SEVERITY_LEVEL=info`. These are
-good for diagnosing connection level failures. For more information please see
-[grpc-go's debug documentation](https://pkg.go.dev/google.golang.org/grpc/examples/features/debugging#section-readme).
-
-#### Add in your own logging with a gRPC interceptors
-
-You may want to add in your own logging around gRPC requests. One way to do this
-is to register a custom interceptor that adds logging. Here is
-an example of how you would do this with the secretmanager client. Note this
-example registers a UnaryClientInterceptor but you may want/need to register
-a StreamClientInterceptor instead-of/as-well depending on what kinds of
-RPCs you are calling.
-
-*WARNING*: Adding this interceptor will log metadata and payloads which may
-contain private information.
-
-```go
-package main
-
-import (
- "context"
- "log"
-
- secretmanager "cloud.google.com/go/secretmanager/apiv1"
- "google.golang.org/api/option"
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-func loggingUnaryInterceptor() grpc.UnaryClientInterceptor {
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- err := invoker(ctx, method, req, reply, cc, opts...)
- log.Printf("Invoked method: %v", method)
- md, ok := metadata.FromOutgoingContext(ctx)
- if ok {
- log.Println("Metadata:")
- for k, v := range md {
- log.Printf("Key: %v, Value: %v", k, v)
- }
- }
- reqb, merr := protojson.Marshal(req.(protoreflect.ProtoMessage))
- if merr == nil {
- log.Printf("Request: %s", reqb)
- }
- return err
- }
-}
-
-func main() {
- ctx := context.Background()
- // Supply custom gRPC interceptor for use by the client.
- client, err := secretmanager.NewClient(ctx,
- option.WithGRPCDialOption(grpc.WithUnaryInterceptor(loggingUnaryInterceptor())),
- )
- if err != nil {
- log.Fatal(err)
- }
- defer client.Close()
- // Use the client
-}
-```
-
-## Telemetry
-
-**Warning: The OpenCensus project is obsolete and was archived on July 31st,
-2023.** This means that any security vulnerabilities that are found will not be
-patched. We recommend that you begin migrating to OpenCensus tracing to
-OpenTelemetry, the successor project. See [OpenCensus](#opencensus) below for
-details.
-
-The Google Cloud client libraries for Go still use the OpenCensus project by
-default. However, opt-in support for
-[OpenTelemetry](https://opentelemetry.io/docs/what-is-opentelemetry/) is now
-available. The transition from OpenCensus to OpenTelemetry is covered in the
-following sections.
-
-### Tracing (experimental)
-
-Apart from spans created by underlying libraries such as gRPC, Google Cloud Go
-generated clients do not create spans. Only the spans created by following
-hand-written clients are in scope for the discussion in this section:
-
-* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery)
-* [cloud.google.com/go/bigtable](https://pkg.go.dev/cloud.google.com/go/bigtable)
-* [cloud.google.com/go/datastore](https://pkg.go.dev/cloud.google.com/go/datastore)
-* [cloud.google.com/go/firestore](https://pkg.go.dev/cloud.google.com/go/firestore)
-* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner)
-* [cloud.google.com/go/storage](https://pkg.go.dev/cloud.google.com/go/storage)
-
-Currently, the spans created by these clients are for OpenCensus. However,
-OpenCensus users are urged to transition to OpenTelemetry as soon as possible,
-as explained in the next section. OpenTelemetry users can opt-in to experimental
-OpenTelemetry support via an environment variable, as described below.
-
-#### OpenCensus
-
-**Warning: The OpenCensus project is obsolete and was archived on July 31st,
-2023.** This means that any security vulnerabilities that are found will not be
-patched. We recommend that you begin migrating to OpenCensus tracing to
-OpenTelemetry, the successor project.
-
-Using the [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus), you can immediately begin exporting your traces with OpenTelemetry, even while
-dependencies of your application remain instrumented with OpenCensus. If you do
-not use the bridge, you will need to migrate your entire application and all of
-its instrumented dependencies at once. For simple applications, this may be
-possible, but we expect the bridge to be helpful if multiple libraries with
-instrumentation are used.
-
-On May 29, 2024, six months after the
-[release](https://github.com/googleapis/google-cloud-go/releases/tag/v0.111.0)
-of experimental, opt-in support for OpenTelemetry tracing, the default tracing
-support in the clients above will change from OpenCensus to OpenTelemetry, and
-the experimental OpenCensus support will be marked as deprecated. To continue
-using the OpenCensus support after this change, set the environment variable
-`GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING` to the case-insensitive
-value `opencensus` before loading the client library.
-
-```sh
-export GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING=opencensus
-```
-
-On December 2nd, 2024, one year after the release of OpenTelemetry support, the
-experimental and deprecated support for OpenCensus tracing will be removed.
-
-Please note that all Google Cloud Go clients currently provide experimental
-support for the propagation of both OpenCensus and OpenTelemetry trace context
-to their receiving endpoints. The experimental support for OpenCensus trace
-context propagation will be removed at the same time as the experimental
-OpenCensus tracing support.
-
-Please refer to the following resources:
-
-* [Sunsetting OpenCensus](https://opentelemetry.io/blog/2023/sunsetting-opencensus/)
-* [OpenTelemetry-Go - OpenCensus Bridge](https://pkg.go.dev/go.opentelemetry.io/otel/bridge/opencensus)
-
-#### OpenTelemetry
-
-**Warning: OpenTelemetry-Go ensures
-[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility)
-with ONLY the current supported versions of the [Go
-language](https://go.dev/doc/devel/release#policy). This support may be narrower
-than the support that has been offered historically by the Go Client Libraries.
-Ensure that your Go runtime version is supported by the OpenTelemetry-Go
-[compatibility](https://github.com/open-telemetry/opentelemetry-go/tree/main?tab=readme-ov-file#compatibility)
-policy before enabling OpenTelemetry instrumentation.**
-
-To opt-in to experimental OpenTelemetry tracing currently available in the
-clients listed above, set the environment variable
-`GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING` to the case-insensitive
-value `opentelemetry` before loading the client library.
-
-```sh
-export GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING=opentelemetry
-```
-
-On May 29, 2024, the default tracing support will change from OpenCensus to
-OpenTelemetry, and this environment variable will no longer be needed.
-
-Please refer to the following resources:
-
-* [What is OpenTelemetry?](https://opentelemetry.io/docs/what-is-opentelemetry/)
-* [Cloud Trace - Go and OpenTelemetry](https://cloud.google.com/trace/docs/setup/go-ot)
-* On GCE, [use Ops Agent and OpenTelemetry](https://cloud.google.com/trace/docs/otlp)
-
-##### Configuring the OpenTelemetry-Go - OpenCensus Bridge
-
-To configure the OpenCensus bridge with OpenTelemetry and Cloud Trace:
-
-```go
-import (
- "context"
- "log"
- "os"
- texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
- octrace "go.opencensus.io/trace"
- "go.opentelemetry.io/contrib/detectors/gcp"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/bridge/opencensus"
- "go.opentelemetry.io/otel/sdk/resource"
- sdktrace "go.opentelemetry.io/otel/sdk/trace"
- semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
-)
-
-func main() {
- // Create exporter.
- ctx := context.Background()
- projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
- exporter, err := texporter.New(texporter.WithProjectID(projectID))
- if err != nil {
- log.Fatalf("texporter.New: %v", err)
- }
- // Identify your application using resource detection
- res, err := resource.New(ctx,
- // Use the GCP resource detector to detect information about the GCP platform
- resource.WithDetectors(gcp.NewDetector()),
- // Keep the default detectors
- resource.WithTelemetrySDK(),
- // Add your own custom attributes to identify your application
- resource.WithAttributes(
- semconv.ServiceNameKey.String("my-application"),
- ),
- )
- if err != nil {
- log.Fatalf("resource.New: %v", err)
- }
- // Create trace provider with the exporter.
- //
- // By default it uses AlwaysSample() which samples all traces.
- // In a production environment or high QPS setup please use
- // probabilistic sampling.
- // Example:
- // tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.TraceIDRatioBased(0.0001)), ...)
- tp := sdktrace.NewTracerProvider(
- sdktrace.WithBatcher(exporter),
- sdktrace.WithResource(res),
- )
- defer tp.Shutdown(ctx) // flushes any pending spans, and closes connections.
- otel.SetTracerProvider(tp)
- tracer := otel.GetTracerProvider().Tracer("example.com/trace")
- // Configure the OpenCensus tracer to use the bridge.
- octrace.DefaultTracer = opencensus.NewTracer(tracer)
- // Use otel tracer to create spans...
-}
-
-```
-
-
-##### Configuring context propagation
-
-In order to pass options to OpenTelemetry trace context propagation, follow the
-appropriate example for the client's underlying transport.
-
-###### Passing options in HTTP-based clients
-
-```go
-ctx := context.Background()
-trans, err := htransport.NewTransport(ctx,
- http.DefaultTransport,
- option.WithScopes(storage.ScopeFullControl),
-)
-if err != nil {
- log.Fatal(err)
-}
-// An example of passing options to the otelhttp.Transport.
-otelOpts := otelhttp.WithFilter(func(r *http.Request) bool {
- return r.URL.Path != "/ping"
-})
-hc := &http.Client{
- Transport: otelhttp.NewTransport(trans, otelOpts),
-}
-client, err := storage.NewClient(ctx, option.WithHTTPClient(hc))
-```
-
-Note that scopes must be set manually in this user-configured solution.
-
-###### Passing options in gRPC-based clients
-
-```go
-projectID := "..."
-ctx := context.Background()
-
-// An example of passing options to grpc.WithStatsHandler.
-otelOpts := otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents)
-dialOpts := grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelOpts))
-
-ctx := context.Background()
-c, err := datastore.NewClient(ctx, projectID, option.WithGRPCDialOption(dialOpts))
-if err != nil {
- log.Fatal(err)
-}
-defer c.Close()
-```
-
-
-### Metrics (experimental)
-
-The generated clients do not create metrics. Only the following hand-written
-clients create experimental OpenCensus metrics:
-
-* [cloud.google.com/go/bigquery](https://pkg.go.dev/cloud.google.com/go/bigquery)
-* [cloud.google.com/go/pubsub](https://pkg.go.dev/cloud.google.com/go/pubsub)
-* [cloud.google.com/go/spanner](https://pkg.go.dev/cloud.google.com/go/spanner)
-
-#### OpenTelemetry
-
-The transition of the experimental metrics in the clients above from OpenCensus
-to OpenTelemetry is still TBD.
\ No newline at end of file
diff --git a/upstream/vendor/cloud.google.com/go/doc.go b/upstream/vendor/cloud.google.com/go/doc.go
deleted file mode 100644
index 133ff68553f..00000000000
--- a/upstream/vendor/cloud.google.com/go/doc.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2014 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package cloud is the root of the packages used to access Google Cloud
-Services. See https://pkg.go.dev/cloud.google.com/go for a full list
-of sub-modules.
-
-# Client Options
-
-All clients in sub-packages are configurable via client options. These options
-are described here: https://pkg.go.dev/google.golang.org/api/option.
-
-# Endpoint Override
-
-Endpoint configuration is used to specify the URL to which requests are
-sent. It is used for services that support or require regional endpoints, as
-well as for other use cases such as [testing against fake servers].
-
-For example, the Vertex AI service recommends that you configure the endpoint to
-the location with the features you want that is closest to your physical
-location or the location of your users. There is no global endpoint for Vertex
-AI. See [Vertex AI - Locations] for more details. The following example
-demonstrates configuring a Vertex AI client with a regional endpoint:
-
- ctx := context.Background()
- endpoint := "us-central1-aiplatform.googleapis.com:443"
- client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint))
-
-# Authentication and Authorization
-
-All of the clients support authentication via [Google Application Default Credentials],
-or by providing a JSON key file for a Service Account. See examples below.
-
-Google Application Default Credentials (ADC) is the recommended way to authorize
-and authenticate clients. For information on how to create and obtain
-Application Default Credentials, see
-https://cloud.google.com/docs/authentication/production. If you have your
-environment configured correctly you will not need to pass any extra information
-to the client libraries. Here is an example of a client using ADC to
-authenticate:
-
- client, err := secretmanager.NewClient(context.Background())
- if err != nil {
- // TODO: handle error.
- }
- _ = client // Use the client.
-
-You can use a file with credentials to authenticate and authorize, such as a
-JSON key file associated with a Google service account. Service Account keys can
-be created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts.
-This example uses the Secret Manger client, but the same steps apply to the
-all other client libraries this package as well. Example:
-
- client, err := secretmanager.NewClient(context.Background(),
- option.WithCredentialsFile("/path/to/service-account-key.json"))
- if err != nil {
- // TODO: handle error.
- }
- _ = client // Use the client.
-
-In some cases (for instance, you don't want to store secrets on disk), you can
-create credentials from in-memory JSON and use the WithCredentials option.
-This example uses the Secret Manager client, but the same steps apply to
-all other client libraries as well. Note that scopes can be
-found at https://developers.google.com/identity/protocols/oauth2/scopes, and
-are also provided in all auto-generated libraries: for example,
-cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example:
-
- ctx := context.Background()
- // https://pkg.go.dev/golang.org/x/oauth2/google
- creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...)
- if err != nil {
- // TODO: handle error.
- }
- client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds))
- if err != nil {
- // TODO: handle error.
- }
- _ = client // Use the client.
-
-# Timeouts and Cancellation
-
-By default, non-streaming methods, like Create or Get, will have a default
-deadline applied to the context provided at call time, unless a context deadline
-is already set. Streaming methods have no default deadline and will run
-indefinitely. To set timeouts or arrange for cancellation, use
-[context]. Transient errors will be retried when correctness allows.
-
-Here is an example of setting a timeout for an RPC using
-[context.WithTimeout]:
-
- ctx := context.Background()
- // Do not set a timeout on the context passed to NewClient: dialing happens
- // asynchronously, and the context is used to refresh credentials in the
- // background.
- client, err := secretmanager.NewClient(ctx)
- if err != nil {
- // TODO: handle error.
- }
- // Time out if it takes more than 10 seconds to create a dataset.
- tctx, cancel := context.WithTimeout(ctx, 10*time.Second)
- defer cancel() // Always call cancel.
-
- req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
- if err := client.DeleteSecret(tctx, req); err != nil {
- // TODO: handle error.
- }
-
-Here is an example of setting a timeout for an RPC using
-[github.com/googleapis/gax-go/v2.WithTimeout]:
-
- ctx := context.Background()
- // Do not set a timeout on the context passed to NewClient: dialing happens
- // asynchronously, and the context is used to refresh credentials in the
- // background.
- client, err := secretmanager.NewClient(ctx)
- if err != nil {
- // TODO: handle error.
- }
-
- req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"}
- // Time out if it takes more than 10 seconds to create a dataset.
- if err := client.DeleteSecret(tctx, req, gax.WithTimeout(10*time.Second)); err != nil {
- // TODO: handle error.
- }
-
-Here is an example of how to arrange for an RPC to be canceled, use
-[context.WithCancel]:
-
- ctx := context.Background()
- // Do not cancel the context passed to NewClient: dialing happens asynchronously,
- // and the context is used to refresh credentials in the background.
- client, err := secretmanager.NewClient(ctx)
- if err != nil {
- // TODO: handle error.
- }
- cctx, cancel := context.WithCancel(ctx)
- defer cancel() // Always call cancel.
-
- // TODO: Make the cancel function available to whatever might want to cancel the
- // call--perhaps a GUI button.
- req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"}
- if err := client.DeleteSecret(cctx, req); err != nil {
- // TODO: handle error.
- }
-
-Do not attempt to control the initial connection (dialing) of a service by
-setting a timeout on the context passed to NewClient. Dialing is non-blocking,
-so timeouts would be ineffective and would only interfere with credential
-refreshing, which uses the same context.
-
-# Headers
-
-Regardless of which transport is used, request headers can be set in the same
-way using [`callctx.SetHeaders`][setheaders].
-
-Here is a generic example:
-
- // Set the header "key" to "value".
- ctx := callctx.SetHeaders(context.Background(), "key", "value")
-
- // Then use ctx in a subsequent request.
- response, err := client.GetSecret(ctx, request)
-
-## Google-reserved headers
-
-There are a some header keys that Google reserves for internal use that must
-not be ovewritten. The following header keys are broadly considered reserved
-and should not be conveyed by client library users unless instructed to do so:
-
-* `x-goog-api-client`
-* `x-goog-request-params`
-
-Be sure to check the individual package documentation for other service-specific
-reserved headers. For example, Storage supports a specific auditing header that
-is mentioned in that [module's documentation][storagedocs].
-
-## Google Cloud system parameters
-
-Google Cloud services respect [system parameters][system parameters] that can be
-used to augment request and/or response behavior. For the most part, they are
-not needed when using one of the enclosed client libraries. However, those that
-may be necessary are made available via the [`callctx`][callctx] package. If not
-present there, consider opening an issue on that repo to request a new constant.
-
-# Connection Pooling
-
-Connection pooling differs in clients based on their transport. Cloud
-clients either rely on HTTP or gRPC transports to communicate
-with Google Cloud.
-
-Cloud clients that use HTTP rely on the underlying HTTP transport to cache
-connections for later re-use. These are cached to the http.MaxIdleConns
-and http.MaxIdleConnsPerHost settings in http.DefaultTransport by default.
-
-For gRPC clients, connection pooling is configurable. Users of Cloud Client
-Libraries may specify option.WithGRPCConnectionPool(n) as a client option to
-NewClient calls. This configures the underlying gRPC connections to be pooled
-and accessed in a round robin fashion.
-
-# Using the Libraries in Container environments(Docker)
-
-Minimal container images like Alpine lack CA certificates. This causes RPCs to
-appear to hang, because gRPC retries indefinitely. See
-https://github.com/googleapis/google-cloud-go/issues/928 for more information.
-
-# Debugging
-
-For tips on how to write tests against code that calls into our libraries check
-out our [Debugging Guide].
-
-# Testing
-
-For tips on how to write tests against code that calls into our libraries check
-out our [Testing Guide].
-
-# Inspecting errors
-
-Most of the errors returned by the generated clients are wrapped in an
-[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped
-into a [google.golang.org/grpc/status.Status] or
-[google.golang.org/api/googleapi.Error] depending on the transport used to make
-the call (gRPC or REST). Converting your errors to these types can be a useful
-way to get more information about what went wrong while debugging.
-
-APIError gives access to specific details in the error. The transport-specific
-errors can still be unwrapped using the APIError.
-
- if err != nil {
- var ae *apierror.APIError
- if errors.As(err, &ae) {
- log.Println(ae.Reason())
- log.Println(ae.Details().Help.GetLinks())
- }
- }
-
-If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can
-still be parsed using the [google.golang.org/grpc/status.FromError] function.
-
- if err != nil {
- if s, ok := status.FromError(err); ok {
- log.Println(s.Message())
- for _, d := range s.Proto().Details {
- log.Println(d)
- }
- }
- }
-
-# Client Stability
-
-Semver is used to communicate stability of the sub-modules of this package.
-Note, some stable sub-modules do contain packages, and sometimes features, that
-are considered unstable. If something is unstable it will be explicitly labeled
-as such. Example of package does in an unstable package:
-
- NOTE: This package is in beta. It is not stable, and may be subject to changes.
-
-Clients that contain alpha and beta in their import path may change or go away
-without notice.
-
-Clients marked stable will maintain compatibility with future versions for as
-long as we can reasonably sustain. Incompatible changes might be made in some
-situations, including:
-
- - Security bugs may prompt backwards-incompatible changes.
- - Situations in which components are no longer feasible to maintain without
- making breaking changes, including removal.
- - Parts of the client surface may be outright unstable and subject to change.
- These parts of the surface will be labeled with the note, "It is EXPERIMENTAL
- and subject to change or removal without notice."
-
-[testing against fake servers]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes
-[Vertex AI - Locations]: https://cloud.google.com/vertex-ai/docs/general/locations
-[Google Application Default Credentials]: https://cloud.google.com/docs/authentication/external/set-up-adc
-[Testing Guide]: https://github.com/googleapis/google-cloud-go/blob/main/testing.md
-[Debugging Guide]: https://github.com/googleapis/google-cloud-go/blob/main/debug.md
-[callctx]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#pkg-constants
-[setheaders]: https://pkg.go.dev/github.com/googleapis/gax-go/v2/callctx#SetHeaders
-[storagedocs]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Sending_Custom_Headers
-[system parameters]: https://cloud.google.com/apis/docs/system-parameters
-*/
-package cloud // import "cloud.google.com/go"
diff --git a/upstream/vendor/cloud.google.com/go/go.work b/upstream/vendor/cloud.google.com/go/go.work
deleted file mode 100644
index bbf602e9af9..00000000000
--- a/upstream/vendor/cloud.google.com/go/go.work
+++ /dev/null
@@ -1,177 +0,0 @@
-go 1.21
-
-use (
- .
- ./accessapproval
- ./accesscontextmanager
- ./advisorynotifications
- ./ai
- ./aiplatform
- ./alloydb
- ./analytics
- ./apigateway
- ./apigeeconnect
- ./apigeeregistry
- ./apikeys
- ./appengine
- ./apphub
- ./apps
- ./area120
- ./artifactregistry
- ./asset
- ./assuredworkloads
- ./auth
- ./auth/oauth2adapt
- ./automl
- ./backupdr
- ./baremetalsolution
- ./batch
- ./beyondcorp
- ./bigquery
- ./bigtable
- ./billing
- ./binaryauthorization
- ./certificatemanager
- ./channel
- ./chat
- ./cloudbuild
- ./cloudcontrolspartner
- ./clouddms
- ./cloudprofiler
- ./cloudquotas
- ./cloudtasks
- ./commerce
- ./compute
- ./compute/metadata
- ./confidentialcomputing
- ./config
- ./contactcenterinsights
- ./container
- ./containeranalysis
- ./datacatalog
- ./dataflow
- ./dataform
- ./datafusion
- ./datalabeling
- ./dataplex
- ./dataproc
- ./dataqna
- ./datastore
- ./datastream
- ./deploy
- ./dialogflow
- ./discoveryengine
- ./dlp
- ./documentai
- ./domains
- ./edgecontainer
- ./edgenetwork
- ./errorreporting
- ./essentialcontacts
- ./eventarc
- ./filestore
- ./firestore
- ./functions
- ./gkebackup
- ./gkeconnect
- ./gkehub
- ./gkemulticloud
- ./grafeas
- ./gsuiteaddons
- ./iam
- ./iap
- ./identitytoolkit
- ./ids
- ./internal/actions
- ./internal/aliasfix
- ./internal/aliasgen
- ./internal/carver
- ./internal/examples/fake
- ./internal/examples/mock
- ./internal/gapicgen
- ./internal/generated/snippets
- ./internal/godocfx
- ./internal/postprocessor
- ./internal/protoveneer
- ./iot
- ./kms
- ./language
- ./lifesciences
- ./logging
- ./longrunning
- ./managedidentities
- ./maps
- ./mediatranslation
- ./memcache
- ./metastore
- ./migrationcenter
- ./monitoring
- ./netapp
- ./networkconnectivity
- ./networkmanagement
- ./networksecurity
- ./notebooks
- ./optimization
- ./orchestration
- ./orgpolicy
- ./osconfig
- ./oslogin
- ./parallelstore
- ./phishingprotection
- ./policysimulator
- ./policytroubleshooter
- ./privatecatalog
- ./profiler
- ./pubsub
- ./pubsublite
- ./rapidmigrationassessment
- ./recaptchaenterprise
- ./recommendationengine
- ./recommender
- ./redis
- ./resourcemanager
- ./resourcesettings
- ./retail
- ./run
- ./scheduler
- ./secretmanager
- ./securesourcemanager
- ./security
- ./securitycenter
- ./securitycentermanagement
- ./securityposture
- ./servicecontrol
- ./servicedirectory
- ./servicehealth
- ./servicemanagement
- ./serviceusage
- ./shell
- ./shopping
- ./spanner
- ./spanner/test/opentelemetry/test
- ./speech
- ./storage
- ./storage/internal/benchmarks
- ./storageinsights
- ./storagetransfer
- ./streetview
- ./support
- ./talent
- ./telcoautomation
- ./texttospeech
- ./tpu
- ./trace
- ./translate
- ./vertexai
- ./video
- ./videointelligence
- ./vision
- ./visionai
- ./vmmigration
- ./vmwareengine
- ./vpcaccess
- ./webrisk
- ./websecurityscanner
- ./workflows
- ./workstations
-)
diff --git a/upstream/vendor/cloud.google.com/go/go.work.sum b/upstream/vendor/cloud.google.com/go/go.work.sum
deleted file mode 100644
index 002f0a0872e..00000000000
--- a/upstream/vendor/cloud.google.com/go/go.work.sum
+++ /dev/null
@@ -1,87 +0,0 @@
-cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU=
-cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k=
-cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU=
-cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.46.0/go.mod h1:V28hx+cUCZC9e3qcqszMb+Sbt8cQZtHTiXOmyDzoDOg=
-github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
-github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo=
-github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A=
-github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.18.3/go.mod h1:BrAJyOMrnwzYVQcP5ziqlCpnEuFfkNppZLzqDyW/YTg=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4=
-github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
-github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
-github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
-github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
-github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
-github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E=
-github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
-github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
-github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
-github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA=
-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/hoisie/redis v0.0.0-20160730154456-b5c6e81454e0/go.mod h1:pMYMxVaKJqCDC1JUg/XbPJ4/fSazB25zORpFzqsIGIc=
-github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
-github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/itchyny/gojq v0.12.9/go.mod h1:T4Ip7AETUXeGpD+436m+UEl3m3tokRgajd5pRfsR5oE=
-github.com/itchyny/timefmt-go v0.1.4/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
-github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
-github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/miekg/dns v1.1.33/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
-github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA=
-go.opentelemetry.io/otel/bridge/opencensus v0.40.0 h1:pqDiayRhBgoqy1vwnscik+TizcImJ58l053NScJyZso=
-go.opentelemetry.io/otel/bridge/opencensus v0.40.0/go.mod h1:1NvVHb6tLTe5A9qCYz+eErW0t8iPn4ZfR6tDKcqlGTM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as=
-go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0/go.mod h1:sTt30Evb7hJB/gEk27qLb1+l9n4Tb8HvHkR0Wx3S6CU=
-go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI=
-go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI=
-golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
-golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
-golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
-golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
-google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg=
-google.golang.org/genproto v0.0.0-20230725213213-b022f6e96895/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
-google.golang.org/genproto/googleapis/api v0.0.0-20230725213213-b022f6e96895/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c=
-google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI=
-google.golang.org/genproto/googleapis/bytestream v0.0.0-20240102182953-50ed04b92917/go.mod h1:O9TvT7A9NLgdqqF0JJXJ+axpaoYiEb8txGmkvy+AvLc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230725213213-b022f6e96895/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/upstream/vendor/cloud.google.com/go/iam/CHANGES.md b/upstream/vendor/cloud.google.com/go/iam/CHANGES.md
index af5ff374887..43a17938486 100644
--- a/upstream/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/upstream/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,20 +1,6 @@
# Changes
-## [1.1.8](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.7...iam/v1.1.8) (2024-05-01)
-
-
-### Bug Fixes
-
-* **iam:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-
-## [1.1.7](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.6...iam/v1.1.7) (2024-03-14)
-
-
-### Bug Fixes
-
-* **iam:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30)
diff --git a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
index 3fbf4530d0d..b5243e61291 100644
--- a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
+++ b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.23.2
// source: google/iam/v1/iam_policy.proto
package iampb
diff --git a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
index 29738ad1ce3..3f854fe496e 100644
--- a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
+++ b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.23.2
// source: google/iam/v1/options.proto
package iampb
diff --git a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
index a4e15741b64..dfc60661a30 100644
--- a/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
+++ b/upstream/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.23.2
// source: google/iam/v1/policy.proto
package iampb
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go
deleted file mode 100644
index fa01cf3dddb..00000000000
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_admin_client.go
+++ /dev/null
@@ -1,1268 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package kms
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- kmspb "cloud.google.com/go/kms/apiv1/kmspb"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- locationpb "google.golang.org/genproto/googleapis/cloud/location"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newAutokeyAdminClientHook clientHook
-
-// AutokeyAdminCallOptions contains the retry settings for each method of AutokeyAdminClient.
-type AutokeyAdminCallOptions struct {
- UpdateAutokeyConfig []gax.CallOption
- GetAutokeyConfig []gax.CallOption
- ShowEffectiveAutokeyConfig []gax.CallOption
- GetLocation []gax.CallOption
- ListLocations []gax.CallOption
- GetIamPolicy []gax.CallOption
- SetIamPolicy []gax.CallOption
- TestIamPermissions []gax.CallOption
- GetOperation []gax.CallOption
-}
-
-func defaultAutokeyAdminGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- internaloption.EnableNewAuthLibrary(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultAutokeyAdminCallOptions() *AutokeyAdminCallOptions {
- return &AutokeyAdminCallOptions{
- UpdateAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ShowEffectiveAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetLocation: []gax.CallOption{},
- ListLocations: []gax.CallOption{},
- GetIamPolicy: []gax.CallOption{},
- SetIamPolicy: []gax.CallOption{},
- TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- }
-}
-
-func defaultAutokeyAdminRESTCallOptions() *AutokeyAdminCallOptions {
- return &AutokeyAdminCallOptions{
- UpdateAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ShowEffectiveAutokeyConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetLocation: []gax.CallOption{},
- ListLocations: []gax.CallOption{},
- GetIamPolicy: []gax.CallOption{},
- SetIamPolicy: []gax.CallOption{},
- TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- }
-}
-
-// internalAutokeyAdminClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API.
-type internalAutokeyAdminClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- UpdateAutokeyConfig(context.Context, *kmspb.UpdateAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error)
- GetAutokeyConfig(context.Context, *kmspb.GetAutokeyConfigRequest, ...gax.CallOption) (*kmspb.AutokeyConfig, error)
- ShowEffectiveAutokeyConfig(context.Context, *kmspb.ShowEffectiveAutokeyConfigRequest, ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error)
- GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error)
- ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
-}
-
-// AutokeyAdminClient is a client for interacting with Cloud Key Management Service (KMS) API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// Provides interfaces for managing Cloud KMS Autokey folder-level
-// configurations. A configuration is inherited by all descendent projects. A
-// configuration at one folder overrides any other configurations in its
-// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
-// Autokey, so that users working in a descendant project can request
-// provisioned CryptoKeys, ready for Customer
-// Managed Encryption Key (CMEK) use, on-demand.
-type AutokeyAdminClient struct {
- // The internal transport-dependent client.
- internalClient internalAutokeyAdminClient
-
- // The call options for this service.
- CallOptions *AutokeyAdminCallOptions
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *AutokeyAdminClient) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *AutokeyAdminClient) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *AutokeyAdminClient) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// UpdateAutokeyConfig updates the AutokeyConfig for a
-// folder. The caller must have both cloudkms.autokeyConfigs.update
-// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy
-// permission on the provided key project. A
-// KeyHandle creation in the folder’s
-// descendant projects will use this configuration to determine where to
-// create the resulting CryptoKey.
-func (c *AutokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- return c.internalClient.UpdateAutokeyConfig(ctx, req, opts...)
-}
-
-// GetAutokeyConfig returns the AutokeyConfig for a
-// folder.
-func (c *AutokeyAdminClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- return c.internalClient.GetAutokeyConfig(ctx, req, opts...)
-}
-
-// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project.
-func (c *AutokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
- return c.internalClient.ShowEffectiveAutokeyConfig(ctx, req, opts...)
-}
-
-// GetLocation gets information about a location.
-func (c *AutokeyAdminClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- return c.internalClient.GetLocation(ctx, req, opts...)
-}
-
-// ListLocations lists information about the supported locations for this service.
-func (c *AutokeyAdminClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- return c.internalClient.ListLocations(ctx, req, opts...)
-}
-
-// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
-// if the resource exists and does not have a policy set.
-func (c *AutokeyAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.GetIamPolicy(ctx, req, opts...)
-}
-
-// SetIamPolicy sets the access control policy on the specified resource. Replaces
-// any existing policy.
-//
-// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
-// errors.
-func (c *AutokeyAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.SetIamPolicy(ctx, req, opts...)
-}
-
-// TestIamPermissions returns permissions that a caller has on the specified resource. If the
-// resource does not exist, this will return an empty set of
-// permissions, not a NOT_FOUND error.
-//
-// Note: This operation is designed to be used for building
-// permission-aware UIs and command-line tools, not for authorization
-// checking. This operation may “fail open” without warning.
-func (c *AutokeyAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- return c.internalClient.TestIamPermissions(ctx, req, opts...)
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *AutokeyAdminClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
-// autokeyAdminGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type autokeyAdminGRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing AutokeyAdminClient
- CallOptions **AutokeyAdminCallOptions
-
- // The gRPC API client.
- autokeyAdminClient kmspb.AutokeyAdminClient
-
- operationsClient longrunningpb.OperationsClient
-
- iamPolicyClient iampb.IAMPolicyClient
-
- locationsClient locationpb.LocationsClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewAutokeyAdminClient creates a new autokey admin client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// Provides interfaces for managing Cloud KMS Autokey folder-level
-// configurations. A configuration is inherited by all descendent projects. A
-// configuration at one folder overrides any other configurations in its
-// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
-// Autokey, so that users working in a descendant project can request
-// provisioned CryptoKeys, ready for Customer
-// Managed Encryption Key (CMEK) use, on-demand.
-func NewAutokeyAdminClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) {
- clientOpts := defaultAutokeyAdminGRPCClientOptions()
- if newAutokeyAdminClientHook != nil {
- hookOpts, err := newAutokeyAdminClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := AutokeyAdminClient{CallOptions: defaultAutokeyAdminCallOptions()}
-
- c := &autokeyAdminGRPCClient{
- connPool: connPool,
- autokeyAdminClient: kmspb.NewAutokeyAdminClient(connPool),
- CallOptions: &client.CallOptions,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
- iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
- locationsClient: locationpb.NewLocationsClient(connPool),
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *autokeyAdminGRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *autokeyAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *autokeyAdminGRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type autokeyAdminRESTClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing AutokeyAdminClient
- CallOptions **AutokeyAdminCallOptions
-}
-
-// NewAutokeyAdminRESTClient creates a new autokey admin rest client.
-//
-// Provides interfaces for managing Cloud KMS Autokey folder-level
-// configurations. A configuration is inherited by all descendent projects. A
-// configuration at one folder overrides any other configurations in its
-// ancestry. Setting a configuration on a folder is a prerequisite for Cloud KMS
-// Autokey, so that users working in a descendant project can request
-// provisioned CryptoKeys, ready for Customer
-// Managed Encryption Key (CMEK) use, on-demand.
-func NewAutokeyAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyAdminClient, error) {
- clientOpts := append(defaultAutokeyAdminRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultAutokeyAdminRESTCallOptions()
- c := &autokeyAdminRESTClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- return &AutokeyAdminClient{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultAutokeyAdminRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableNewAuthLibrary(),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *autokeyAdminRESTClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *autokeyAdminRESTClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *autokeyAdminRESTClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *autokeyAdminGRPCClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...)
- var resp *kmspb.AutokeyConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyAdminClient.UpdateAutokeyConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...)
- var resp *kmspb.AutokeyConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyAdminClient.GetAutokeyConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...)
- var resp *kmspb.ShowEffectiveAutokeyConfigResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyAdminClient.ShowEffectiveAutokeyConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
- var resp *locationpb.Location
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...)
- it := &LocationIterator{}
- req = proto.Clone(req).(*locationpb.ListLocationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
- resp := &locationpb.ListLocationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetLocations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *autokeyAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- var resp *iampb.TestIamPermissionsResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyAdminGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-// UpdateAutokeyConfig updates the AutokeyConfig for a
-// folder. The caller must have both cloudkms.autokeyConfigs.update
-// permission on the parent folder and cloudkms.cryptoKeys.setIamPolicy
-// permission on the provided key project. A
-// KeyHandle creation in the folder’s
-// descendant projects will use this configuration to determine where to
-// create the resulting CryptoKey.
-func (c *autokeyAdminRESTClient) UpdateAutokeyConfig(ctx context.Context, req *kmspb.UpdateAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetAutokeyConfig()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetAutokeyConfig().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
- if err != nil {
- return nil, err
- }
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "autokey_config.name", url.QueryEscape(req.GetAutokeyConfig().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).UpdateAutokeyConfig[0:len((*c.CallOptions).UpdateAutokeyConfig):len((*c.CallOptions).UpdateAutokeyConfig)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &kmspb.AutokeyConfig{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetAutokeyConfig returns the AutokeyConfig for a
-// folder.
-func (c *autokeyAdminRESTClient) GetAutokeyConfig(ctx context.Context, req *kmspb.GetAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.AutokeyConfig, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetAutokeyConfig[0:len((*c.CallOptions).GetAutokeyConfig):len((*c.CallOptions).GetAutokeyConfig)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &kmspb.AutokeyConfig{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ShowEffectiveAutokeyConfig returns the effective Cloud KMS Autokey configuration for a given project.
-func (c *autokeyAdminRESTClient) ShowEffectiveAutokeyConfig(ctx context.Context, req *kmspb.ShowEffectiveAutokeyConfigRequest, opts ...gax.CallOption) (*kmspb.ShowEffectiveAutokeyConfigResponse, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:showEffectiveAutokeyConfig", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).ShowEffectiveAutokeyConfig[0:len((*c.CallOptions).ShowEffectiveAutokeyConfig):len((*c.CallOptions).ShowEffectiveAutokeyConfig)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &kmspb.ShowEffectiveAutokeyConfigResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetLocation gets information about a location.
-func (c *autokeyAdminRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &locationpb.Location{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ListLocations lists information about the supported locations for this service.
-func (c *autokeyAdminRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- it := &LocationIterator{}
- req = proto.Clone(req).(*locationpb.ListLocationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
- resp := &locationpb.ListLocationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetLocations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
-// if the resource exists and does not have a policy set.
-func (c *autokeyAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetOptions().GetRequestedPolicyVersion() != 0 {
- params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// SetIamPolicy sets the access control policy on the specified resource. Replaces
-// any existing policy.
-//
-// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
-// errors.
-func (c *autokeyAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// TestIamPermissions returns permissions that a caller has on the specified resource. If the
-// resource does not exist, this will return an empty set of
-// permissions, not a NOT_FOUND error.
-//
-// Note: This operation is designed to be used for building
-// permission-aware UIs and command-line tools, not for authorization
-// checking. This operation may “fail open” without warning.
-func (c *autokeyAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.TestIamPermissionsResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *autokeyAdminRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go
deleted file mode 100644
index c11e3ad222b..00000000000
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/autokey_client.go
+++ /dev/null
@@ -1,1345 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package kms
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- kmspb "cloud.google.com/go/kms/apiv1/kmspb"
- "cloud.google.com/go/longrunning"
- lroauto "cloud.google.com/go/longrunning/autogen"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- locationpb "google.golang.org/genproto/googleapis/cloud/location"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newAutokeyClientHook clientHook
-
-// AutokeyCallOptions contains the retry settings for each method of AutokeyClient.
-type AutokeyCallOptions struct {
- CreateKeyHandle []gax.CallOption
- GetKeyHandle []gax.CallOption
- ListKeyHandles []gax.CallOption
- GetLocation []gax.CallOption
- ListLocations []gax.CallOption
- GetIamPolicy []gax.CallOption
- SetIamPolicy []gax.CallOption
- TestIamPermissions []gax.CallOption
- GetOperation []gax.CallOption
-}
-
-func defaultAutokeyGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("cloudkms.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("cloudkms.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("cloudkms.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- internaloption.EnableNewAuthLibrary(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultAutokeyCallOptions() *AutokeyCallOptions {
- return &AutokeyCallOptions{
- CreateKeyHandle: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- },
- GetKeyHandle: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListKeyHandles: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetLocation: []gax.CallOption{},
- ListLocations: []gax.CallOption{},
- GetIamPolicy: []gax.CallOption{},
- SetIamPolicy: []gax.CallOption{},
- TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- }
-}
-
-func defaultAutokeyRESTCallOptions() *AutokeyCallOptions {
- return &AutokeyCallOptions{
- CreateKeyHandle: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- },
- GetKeyHandle: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListKeyHandles: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 100 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetLocation: []gax.CallOption{},
- ListLocations: []gax.CallOption{},
- GetIamPolicy: []gax.CallOption{},
- SetIamPolicy: []gax.CallOption{},
- TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- }
-}
-
-// internalAutokeyClient is an interface that defines the methods available from Cloud Key Management Service (KMS) API.
-type internalAutokeyClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- CreateKeyHandle(context.Context, *kmspb.CreateKeyHandleRequest, ...gax.CallOption) (*CreateKeyHandleOperation, error)
- CreateKeyHandleOperation(name string) *CreateKeyHandleOperation
- GetKeyHandle(context.Context, *kmspb.GetKeyHandleRequest, ...gax.CallOption) (*kmspb.KeyHandle, error)
- ListKeyHandles(context.Context, *kmspb.ListKeyHandlesRequest, ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error)
- GetLocation(context.Context, *locationpb.GetLocationRequest, ...gax.CallOption) (*locationpb.Location, error)
- ListLocations(context.Context, *locationpb.ListLocationsRequest, ...gax.CallOption) *LocationIterator
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
-}
-
-// AutokeyClient is a client for interacting with Cloud Key Management Service (KMS) API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// Provides interfaces for using Cloud KMS Autokey to provision new
-// CryptoKeys, ready for Customer Managed
-// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
-// feature is modeled around a KeyHandle
-// resource: creating a KeyHandle in a resource
-// project and given location triggers Cloud KMS Autokey to provision a
-// CryptoKey in the configured key project and
-// the same location.
-//
-// Prior to use in a given resource project,
-// UpdateAutokeyConfig
-// should have been called on an ancestor folder, setting the key project where
-// Cloud KMS Autokey should create new
-// CryptoKeys. See documentation for additional
-// prerequisites. To check what key project, if any, is currently configured on
-// a resource project’s ancestor folder, see
-// ShowEffectiveAutokeyConfig.
-type AutokeyClient struct {
- // The internal transport-dependent client.
- internalClient internalAutokeyClient
-
- // The call options for this service.
- CallOptions *AutokeyCallOptions
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient *lroauto.OperationsClient
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *AutokeyClient) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *AutokeyClient) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *AutokeyClient) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// CreateKeyHandle creates a new KeyHandle, triggering the
-// provisioning of a new CryptoKey for CMEK
-// use with the given resource type in the configured key project and the same
-// location. GetOperation should be used to resolve
-// the resulting long-running operation and get the resulting
-// KeyHandle and
-// CryptoKey.
-func (c *AutokeyClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
- return c.internalClient.CreateKeyHandle(ctx, req, opts...)
-}
-
-// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
-// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
-func (c *AutokeyClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
- return c.internalClient.CreateKeyHandleOperation(name)
-}
-
-// GetKeyHandle returns the KeyHandle.
-func (c *AutokeyClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
- return c.internalClient.GetKeyHandle(ctx, req, opts...)
-}
-
-// ListKeyHandles lists KeyHandles.
-func (c *AutokeyClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) {
- return c.internalClient.ListKeyHandles(ctx, req, opts...)
-}
-
-// GetLocation gets information about a location.
-func (c *AutokeyClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- return c.internalClient.GetLocation(ctx, req, opts...)
-}
-
-// ListLocations lists information about the supported locations for this service.
-func (c *AutokeyClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- return c.internalClient.ListLocations(ctx, req, opts...)
-}
-
-// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
-// if the resource exists and does not have a policy set.
-func (c *AutokeyClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.GetIamPolicy(ctx, req, opts...)
-}
-
-// SetIamPolicy sets the access control policy on the specified resource. Replaces
-// any existing policy.
-//
-// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
-// errors.
-func (c *AutokeyClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.SetIamPolicy(ctx, req, opts...)
-}
-
-// TestIamPermissions returns permissions that a caller has on the specified resource. If the
-// resource does not exist, this will return an empty set of
-// permissions, not a NOT_FOUND error.
-//
-// Note: This operation is designed to be used for building
-// permission-aware UIs and command-line tools, not for authorization
-// checking. This operation may “fail open” without warning.
-func (c *AutokeyClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- return c.internalClient.TestIamPermissions(ctx, req, opts...)
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *AutokeyClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
-// autokeyGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type autokeyGRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing AutokeyClient
- CallOptions **AutokeyCallOptions
-
- // The gRPC API client.
- autokeyClient kmspb.AutokeyClient
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- operationsClient longrunningpb.OperationsClient
-
- iamPolicyClient iampb.IAMPolicyClient
-
- locationsClient locationpb.LocationsClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewAutokeyClient creates a new autokey client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// Provides interfaces for using Cloud KMS Autokey to provision new
-// CryptoKeys, ready for Customer Managed
-// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
-// feature is modeled around a KeyHandle
-// resource: creating a KeyHandle in a resource
-// project and given location triggers Cloud KMS Autokey to provision a
-// CryptoKey in the configured key project and
-// the same location.
-//
-// Prior to use in a given resource project,
-// UpdateAutokeyConfig
-// should have been called on an ancestor folder, setting the key project where
-// Cloud KMS Autokey should create new
-// CryptoKeys. See documentation for additional
-// prerequisites. To check what key project, if any, is currently configured on
-// a resource project’s ancestor folder, see
-// ShowEffectiveAutokeyConfig.
-func NewAutokeyClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) {
- clientOpts := defaultAutokeyGRPCClientOptions()
- if newAutokeyClientHook != nil {
- hookOpts, err := newAutokeyClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := AutokeyClient{CallOptions: defaultAutokeyCallOptions()}
-
- c := &autokeyGRPCClient{
- connPool: connPool,
- autokeyClient: kmspb.NewAutokeyClient(connPool),
- CallOptions: &client.CallOptions,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
- iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
- locationsClient: locationpb.NewLocationsClient(connPool),
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
- if err != nil {
- // This error "should not happen", since we are just reusing old connection pool
- // and never actually need to dial.
- // If this does happen, we could leak connp. However, we cannot close conn:
- // If the user invoked the constructor with option.WithGRPCConn,
- // we would close a connection that's still in use.
- // TODO: investigate error conditions.
- return nil, err
- }
- c.LROClient = &client.LROClient
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *autokeyGRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *autokeyGRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *autokeyGRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type autokeyRESTClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing AutokeyClient
- CallOptions **AutokeyCallOptions
-}
-
-// NewAutokeyRESTClient creates a new autokey rest client.
-//
-// Provides interfaces for using Cloud KMS Autokey to provision new
-// CryptoKeys, ready for Customer Managed
-// Encryption Key (CMEK) use, on-demand. To support certain client tooling, this
-// feature is modeled around a KeyHandle
-// resource: creating a KeyHandle in a resource
-// project and given location triggers Cloud KMS Autokey to provision a
-// CryptoKey in the configured key project and
-// the same location.
-//
-// Prior to use in a given resource project,
-// UpdateAutokeyConfig
-// should have been called on an ancestor folder, setting the key project where
-// Cloud KMS Autokey should create new
-// CryptoKeys. See documentation for additional
-// prerequisites. To check what key project, if any, is currently configured on
-// a resource project’s ancestor folder, see
-// ShowEffectiveAutokeyConfig.
-func NewAutokeyRESTClient(ctx context.Context, opts ...option.ClientOption) (*AutokeyClient, error) {
- clientOpts := append(defaultAutokeyRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultAutokeyRESTCallOptions()
- c := &autokeyRESTClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- lroOpts := []option.ClientOption{
- option.WithHTTPClient(httpClient),
- option.WithEndpoint(endpoint),
- }
- opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
- if err != nil {
- return nil, err
- }
- c.LROClient = &opClient
-
- return &AutokeyClient{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultAutokeyRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://cloudkms.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://cloudkms.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://cloudkms.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableNewAuthLibrary(),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *autokeyRESTClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *autokeyRESTClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *autokeyRESTClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *autokeyGRPCClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateKeyHandle[0:len((*c.CallOptions).CreateKeyHandle):len((*c.CallOptions).CreateKeyHandle)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyClient.CreateKeyHandle(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateKeyHandleOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *autokeyGRPCClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...)
- var resp *kmspb.KeyHandle
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyClient.GetKeyHandle(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...)
- var resp *kmspb.ListKeyHandlesResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.autokeyClient.ListKeyHandles(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
- var resp *locationpb.Location
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.locationsClient.GetLocation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListLocations[0:len((*c.CallOptions).ListLocations):len((*c.CallOptions).ListLocations)], opts...)
- it := &LocationIterator{}
- req = proto.Clone(req).(*locationpb.ListLocationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
- resp := &locationpb.ListLocationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.locationsClient.ListLocations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetLocations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *autokeyGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.GetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.SetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- var resp *iampb.TestIamPermissionsResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.iamPolicyClient.TestIamPermissions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *autokeyGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-// CreateKeyHandle creates a new KeyHandle, triggering the
-// provisioning of a new CryptoKey for CMEK
-// use with the given resource type in the configured key project and the same
-// location. GetOperation should be used to resolve
-// the resulting long-running operation and get the resulting
-// KeyHandle and
-// CryptoKey.
-func (c *autokeyRESTClient) CreateKeyHandle(ctx context.Context, req *kmspb.CreateKeyHandleRequest, opts ...gax.CallOption) (*CreateKeyHandleOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetKeyHandle()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetKeyHandleId() != "" {
- params.Add("keyHandleId", fmt.Sprintf("%v", req.GetKeyHandleId()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateKeyHandleOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// GetKeyHandle returns the KeyHandle.
-func (c *autokeyRESTClient) GetKeyHandle(ctx context.Context, req *kmspb.GetKeyHandleRequest, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetKeyHandle[0:len((*c.CallOptions).GetKeyHandle):len((*c.CallOptions).GetKeyHandle)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &kmspb.KeyHandle{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ListKeyHandles lists KeyHandles.
-func (c *autokeyRESTClient) ListKeyHandles(ctx context.Context, req *kmspb.ListKeyHandlesRequest, opts ...gax.CallOption) (*kmspb.ListKeyHandlesResponse, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/keyHandles", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).ListKeyHandles[0:len((*c.CallOptions).ListKeyHandles):len((*c.CallOptions).ListKeyHandles)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &kmspb.ListKeyHandlesResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetLocation gets information about a location.
-func (c *autokeyRESTClient) GetLocation(ctx context.Context, req *locationpb.GetLocationRequest, opts ...gax.CallOption) (*locationpb.Location, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetLocation[0:len((*c.CallOptions).GetLocation):len((*c.CallOptions).GetLocation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &locationpb.Location{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ListLocations lists information about the supported locations for this service.
-func (c *autokeyRESTClient) ListLocations(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) *LocationIterator {
- it := &LocationIterator{}
- req = proto.Clone(req).(*locationpb.ListLocationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*locationpb.Location, string, error) {
- resp := &locationpb.ListLocationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/locations", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetLocations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// GetIamPolicy gets the access control policy for a resource. Returns an empty policy
-// if the resource exists and does not have a policy set.
-func (c *autokeyRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetOptions().GetRequestedPolicyVersion() != 0 {
- params.Add("options.requestedPolicyVersion", fmt.Sprintf("%v", req.GetOptions().GetRequestedPolicyVersion()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// SetIamPolicy sets the access control policy on the specified resource. Replaces
-// any existing policy.
-//
-// Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED
-// errors.
-func (c *autokeyRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// TestIamPermissions returns permissions that a caller has on the specified resource. If the
-// resource does not exist, this will return an empty set of
-// permissions, not a NOT_FOUND error.
-//
-// Note: This operation is designed to be used for building
-// permission-aware UIs and command-line tools, not for authorization
-// checking. This operation may “fail open” without warning.
-func (c *autokeyRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.TestIamPermissionsResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *autokeyRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
-// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
-func (c *autokeyGRPCClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
- return &CreateKeyHandleOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateKeyHandleOperation returns a new CreateKeyHandleOperation from a given name.
-// The name must be that of a previously created CreateKeyHandleOperation, possibly from a different process.
-func (c *autokeyRESTClient) CreateKeyHandleOperation(name string) *CreateKeyHandleOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateKeyHandleOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
index 0ca75b22f8a..49c301e3375 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/auxiliary.go
@@ -17,80 +17,11 @@
package kms
import (
- "context"
- "time"
-
kmspb "cloud.google.com/go/kms/apiv1/kmspb"
- "cloud.google.com/go/longrunning"
- gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
locationpb "google.golang.org/genproto/googleapis/cloud/location"
)
-// CreateKeyHandleOperation manages a long-running operation from CreateKeyHandle.
-type CreateKeyHandleOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateKeyHandleOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp kmspb.KeyHandle
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateKeyHandleOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*kmspb.KeyHandle, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp kmspb.KeyHandle
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateKeyHandleOperation) Metadata() (*kmspb.CreateKeyHandleMetadata, error) {
- var meta kmspb.CreateKeyHandleMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateKeyHandleOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateKeyHandleOperation) Name() string {
- return op.lro.Name()
-}
-
// CryptoKeyIterator manages a stream of *kmspb.CryptoKey.
type CryptoKeyIterator struct {
items []*kmspb.CryptoKey
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/doc.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/doc.go
index 432780fa5a3..b20d1ac4014 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/doc.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/doc.go
@@ -42,7 +42,7 @@
// // - It may require correct/in-range values for request initialization.
// // - It may require specifying regional endpoints when creating the service client as shown in:
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := kms.NewAutokeyClient(ctx)
+// c, err := kms.NewEkmClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
@@ -62,22 +62,17 @@
// // - It may require correct/in-range values for request initialization.
// // - It may require specifying regional endpoints when creating the service client as shown in:
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := kms.NewAutokeyClient(ctx)
+// c, err := kms.NewEkmClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
-// req := &kmspb.CreateKeyHandleRequest{
+// req := &kmspb.CreateEkmConnectionRequest{
// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#CreateKeyHandleRequest.
+// // See https://pkg.go.dev/cloud.google.com/go/kms/apiv1/kmspb#CreateEkmConnectionRequest.
// }
-// op, err := c.CreateKeyHandle(ctx, req)
-// if err != nil {
-// // TODO: Handle error.
-// }
-//
-// resp, err := op.Wait(ctx)
+// resp, err := c.CreateEkmConnection(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
@@ -86,7 +81,7 @@
//
// # Use of Context
//
-// The ctx passed to NewAutokeyClient is used for authentication requests and
+// The ctx passed to NewEkmClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.
// Individual methods on the client use the ctx given to them.
//
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
index 6d1856c9eac..1e8d2ffe0a0 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/ekm_client.go
@@ -28,7 +28,6 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
kmspb "cloud.google.com/go/kms/apiv1/kmspb"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@@ -59,7 +58,6 @@ type EkmCallOptions struct {
GetIamPolicy []gax.CallOption
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
- GetOperation []gax.CallOption
}
func defaultEkmGRPCClientOptions() []option.ClientOption {
@@ -71,7 +69,6 @@ func defaultEkmGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
- internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -139,7 +136,6 @@ func defaultEkmCallOptions() *EkmCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
}
}
@@ -201,7 +197,6 @@ func defaultEkmRESTCallOptions() *EkmCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
}
}
@@ -222,7 +217,6 @@ type internalEkmClient interface {
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
}
// EkmClient is a client for interacting with Cloud Key Management Service (KMS) API.
@@ -344,11 +338,6 @@ func (c *EkmClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPe
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *EkmClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
// ekmGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -362,8 +351,6 @@ type ekmGRPCClient struct {
// The gRPC API client.
ekmClient kmspb.EkmServiceClient
- operationsClient longrunningpb.OperationsClient
-
iamPolicyClient iampb.IAMPolicyClient
locationsClient locationpb.LocationsClient
@@ -398,12 +385,11 @@ func NewEkmClient(ctx context.Context, opts ...option.ClientOption) (*EkmClient,
client := EkmClient{CallOptions: defaultEkmCallOptions()}
c := &ekmGRPCClient{
- connPool: connPool,
- ekmClient: kmspb.NewEkmServiceClient(connPool),
- CallOptions: &client.CallOptions,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
- iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
- locationsClient: locationpb.NewLocationsClient(connPool),
+ connPool: connPool,
+ ekmClient: kmspb.NewEkmServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
+ locationsClient: locationpb.NewLocationsClient(connPool),
}
c.setGoogleClientInfo()
@@ -426,9 +412,7 @@ func (c *ekmGRPCClient) Connection() *grpc.ClientConn {
func (c *ekmGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
+ c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
}
// Close closes the connection to the API service. The user should invoke this when
@@ -486,7 +470,6 @@ func defaultEkmRESTClientOptions() []option.ClientOption {
internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableNewAuthLibrary(),
}
}
@@ -496,9 +479,7 @@ func defaultEkmRESTClientOptions() []option.ClientOption {
func (c *ekmRESTClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
+ c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
}
// Close closes the connection to the API service. The user should invoke this when
@@ -787,24 +768,6 @@ func (c *ekmGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
return resp, nil
}
-func (c *ekmGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
// ListEkmConnections lists EkmConnections.
func (c *ekmRESTClient) ListEkmConnections(ctx context.Context, req *kmspb.ListEkmConnectionsRequest, opts ...gax.CallOption) *EkmConnectionIterator {
it := &EkmConnectionIterator{}
@@ -1661,63 +1624,3 @@ func (c *ekmRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestI
}
return resp, nil
}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *ekmRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json b/upstream/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json
deleted file mode 100644
index 47b250252cc..00000000000
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/gapic_metadata.json
+++ /dev/null
@@ -1,715 +0,0 @@
-{
- "schema": "1.0",
- "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
- "language": "go",
- "protoPackage": "google.cloud.kms.v1",
- "libraryPackage": "cloud.google.com/go/kms/apiv1",
- "services": {
- "Autokey": {
- "clients": {
- "grpc": {
- "libraryClient": "AutokeyClient",
- "rpcs": {
- "CreateKeyHandle": {
- "methods": [
- "CreateKeyHandle"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetKeyHandle": {
- "methods": [
- "GetKeyHandle"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListKeyHandles": {
- "methods": [
- "ListKeyHandles"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- }
- }
- },
- "rest": {
- "libraryClient": "AutokeyClient",
- "rpcs": {
- "CreateKeyHandle": {
- "methods": [
- "CreateKeyHandle"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetKeyHandle": {
- "methods": [
- "GetKeyHandle"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListKeyHandles": {
- "methods": [
- "ListKeyHandles"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- }
- }
- }
- }
- },
- "AutokeyAdmin": {
- "clients": {
- "grpc": {
- "libraryClient": "AutokeyAdminClient",
- "rpcs": {
- "GetAutokeyConfig": {
- "methods": [
- "GetAutokeyConfig"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "ShowEffectiveAutokeyConfig": {
- "methods": [
- "ShowEffectiveAutokeyConfig"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateAutokeyConfig": {
- "methods": [
- "UpdateAutokeyConfig"
- ]
- }
- }
- },
- "rest": {
- "libraryClient": "AutokeyAdminClient",
- "rpcs": {
- "GetAutokeyConfig": {
- "methods": [
- "GetAutokeyConfig"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "ShowEffectiveAutokeyConfig": {
- "methods": [
- "ShowEffectiveAutokeyConfig"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateAutokeyConfig": {
- "methods": [
- "UpdateAutokeyConfig"
- ]
- }
- }
- }
- }
- },
- "EkmService": {
- "clients": {
- "grpc": {
- "libraryClient": "EkmClient",
- "rpcs": {
- "CreateEkmConnection": {
- "methods": [
- "CreateEkmConnection"
- ]
- },
- "GetEkmConfig": {
- "methods": [
- "GetEkmConfig"
- ]
- },
- "GetEkmConnection": {
- "methods": [
- "GetEkmConnection"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListEkmConnections": {
- "methods": [
- "ListEkmConnections"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateEkmConfig": {
- "methods": [
- "UpdateEkmConfig"
- ]
- },
- "UpdateEkmConnection": {
- "methods": [
- "UpdateEkmConnection"
- ]
- },
- "VerifyConnectivity": {
- "methods": [
- "VerifyConnectivity"
- ]
- }
- }
- },
- "rest": {
- "libraryClient": "EkmClient",
- "rpcs": {
- "CreateEkmConnection": {
- "methods": [
- "CreateEkmConnection"
- ]
- },
- "GetEkmConfig": {
- "methods": [
- "GetEkmConfig"
- ]
- },
- "GetEkmConnection": {
- "methods": [
- "GetEkmConnection"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListEkmConnections": {
- "methods": [
- "ListEkmConnections"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateEkmConfig": {
- "methods": [
- "UpdateEkmConfig"
- ]
- },
- "UpdateEkmConnection": {
- "methods": [
- "UpdateEkmConnection"
- ]
- },
- "VerifyConnectivity": {
- "methods": [
- "VerifyConnectivity"
- ]
- }
- }
- }
- }
- },
- "KeyManagementService": {
- "clients": {
- "grpc": {
- "libraryClient": "KeyManagementClient",
- "rpcs": {
- "AsymmetricDecrypt": {
- "methods": [
- "AsymmetricDecrypt"
- ]
- },
- "AsymmetricSign": {
- "methods": [
- "AsymmetricSign"
- ]
- },
- "CreateCryptoKey": {
- "methods": [
- "CreateCryptoKey"
- ]
- },
- "CreateCryptoKeyVersion": {
- "methods": [
- "CreateCryptoKeyVersion"
- ]
- },
- "CreateImportJob": {
- "methods": [
- "CreateImportJob"
- ]
- },
- "CreateKeyRing": {
- "methods": [
- "CreateKeyRing"
- ]
- },
- "Decrypt": {
- "methods": [
- "Decrypt"
- ]
- },
- "DestroyCryptoKeyVersion": {
- "methods": [
- "DestroyCryptoKeyVersion"
- ]
- },
- "Encrypt": {
- "methods": [
- "Encrypt"
- ]
- },
- "GenerateRandomBytes": {
- "methods": [
- "GenerateRandomBytes"
- ]
- },
- "GetCryptoKey": {
- "methods": [
- "GetCryptoKey"
- ]
- },
- "GetCryptoKeyVersion": {
- "methods": [
- "GetCryptoKeyVersion"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetImportJob": {
- "methods": [
- "GetImportJob"
- ]
- },
- "GetKeyRing": {
- "methods": [
- "GetKeyRing"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "GetPublicKey": {
- "methods": [
- "GetPublicKey"
- ]
- },
- "ImportCryptoKeyVersion": {
- "methods": [
- "ImportCryptoKeyVersion"
- ]
- },
- "ListCryptoKeyVersions": {
- "methods": [
- "ListCryptoKeyVersions"
- ]
- },
- "ListCryptoKeys": {
- "methods": [
- "ListCryptoKeys"
- ]
- },
- "ListImportJobs": {
- "methods": [
- "ListImportJobs"
- ]
- },
- "ListKeyRings": {
- "methods": [
- "ListKeyRings"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "MacSign": {
- "methods": [
- "MacSign"
- ]
- },
- "MacVerify": {
- "methods": [
- "MacVerify"
- ]
- },
- "RawDecrypt": {
- "methods": [
- "RawDecrypt"
- ]
- },
- "RawEncrypt": {
- "methods": [
- "RawEncrypt"
- ]
- },
- "RestoreCryptoKeyVersion": {
- "methods": [
- "RestoreCryptoKeyVersion"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateCryptoKey": {
- "methods": [
- "UpdateCryptoKey"
- ]
- },
- "UpdateCryptoKeyPrimaryVersion": {
- "methods": [
- "UpdateCryptoKeyPrimaryVersion"
- ]
- },
- "UpdateCryptoKeyVersion": {
- "methods": [
- "UpdateCryptoKeyVersion"
- ]
- }
- }
- },
- "rest": {
- "libraryClient": "KeyManagementClient",
- "rpcs": {
- "AsymmetricDecrypt": {
- "methods": [
- "AsymmetricDecrypt"
- ]
- },
- "AsymmetricSign": {
- "methods": [
- "AsymmetricSign"
- ]
- },
- "CreateCryptoKey": {
- "methods": [
- "CreateCryptoKey"
- ]
- },
- "CreateCryptoKeyVersion": {
- "methods": [
- "CreateCryptoKeyVersion"
- ]
- },
- "CreateImportJob": {
- "methods": [
- "CreateImportJob"
- ]
- },
- "CreateKeyRing": {
- "methods": [
- "CreateKeyRing"
- ]
- },
- "Decrypt": {
- "methods": [
- "Decrypt"
- ]
- },
- "DestroyCryptoKeyVersion": {
- "methods": [
- "DestroyCryptoKeyVersion"
- ]
- },
- "Encrypt": {
- "methods": [
- "Encrypt"
- ]
- },
- "GenerateRandomBytes": {
- "methods": [
- "GenerateRandomBytes"
- ]
- },
- "GetCryptoKey": {
- "methods": [
- "GetCryptoKey"
- ]
- },
- "GetCryptoKeyVersion": {
- "methods": [
- "GetCryptoKeyVersion"
- ]
- },
- "GetIamPolicy": {
- "methods": [
- "GetIamPolicy"
- ]
- },
- "GetImportJob": {
- "methods": [
- "GetImportJob"
- ]
- },
- "GetKeyRing": {
- "methods": [
- "GetKeyRing"
- ]
- },
- "GetLocation": {
- "methods": [
- "GetLocation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "GetPublicKey": {
- "methods": [
- "GetPublicKey"
- ]
- },
- "ImportCryptoKeyVersion": {
- "methods": [
- "ImportCryptoKeyVersion"
- ]
- },
- "ListCryptoKeyVersions": {
- "methods": [
- "ListCryptoKeyVersions"
- ]
- },
- "ListCryptoKeys": {
- "methods": [
- "ListCryptoKeys"
- ]
- },
- "ListImportJobs": {
- "methods": [
- "ListImportJobs"
- ]
- },
- "ListKeyRings": {
- "methods": [
- "ListKeyRings"
- ]
- },
- "ListLocations": {
- "methods": [
- "ListLocations"
- ]
- },
- "MacSign": {
- "methods": [
- "MacSign"
- ]
- },
- "MacVerify": {
- "methods": [
- "MacVerify"
- ]
- },
- "RawDecrypt": {
- "methods": [
- "RawDecrypt"
- ]
- },
- "RawEncrypt": {
- "methods": [
- "RawEncrypt"
- ]
- },
- "RestoreCryptoKeyVersion": {
- "methods": [
- "RestoreCryptoKeyVersion"
- ]
- },
- "SetIamPolicy": {
- "methods": [
- "SetIamPolicy"
- ]
- },
- "TestIamPermissions": {
- "methods": [
- "TestIamPermissions"
- ]
- },
- "UpdateCryptoKey": {
- "methods": [
- "UpdateCryptoKey"
- ]
- },
- "UpdateCryptoKeyPrimaryVersion": {
- "methods": [
- "UpdateCryptoKeyPrimaryVersion"
- ]
- },
- "UpdateCryptoKeyVersion": {
- "methods": [
- "UpdateCryptoKeyVersion"
- ]
- }
- }
- }
- }
- }
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
index 7b3492e9113..2f71ee36c77 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/key_management_client.go
@@ -28,7 +28,6 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
kmspb "cloud.google.com/go/kms/apiv1/kmspb"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@@ -80,7 +79,6 @@ type KeyManagementCallOptions struct {
GetIamPolicy []gax.CallOption
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
- GetOperation []gax.CallOption
}
func defaultKeyManagementGRPCClientOptions() []option.ClientOption {
@@ -92,7 +90,6 @@ func defaultKeyManagementGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
- internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -425,7 +422,6 @@ func defaultKeyManagementCallOptions() *KeyManagementCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
}
}
@@ -732,7 +728,6 @@ func defaultKeyManagementRESTCallOptions() *KeyManagementCallOptions {
GetIamPolicy: []gax.CallOption{},
SetIamPolicy: []gax.CallOption{},
TestIamPermissions: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
}
}
@@ -774,7 +769,6 @@ type internalKeyManagementClient interface {
GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
}
// KeyManagementClient is a client for interacting with Cloud Key Management Service (KMS) API.
@@ -1104,11 +1098,6 @@ func (c *KeyManagementClient) TestIamPermissions(ctx context.Context, req *iampb
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *KeyManagementClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
// keyManagementGRPCClient is a client for interacting with Cloud Key Management Service (KMS) API over gRPC transport.
//
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
@@ -1122,8 +1111,6 @@ type keyManagementGRPCClient struct {
// The gRPC API client.
keyManagementClient kmspb.KeyManagementServiceClient
- operationsClient longrunningpb.OperationsClient
-
iamPolicyClient iampb.IAMPolicyClient
locationsClient locationpb.LocationsClient
@@ -1170,7 +1157,6 @@ func NewKeyManagementClient(ctx context.Context, opts ...option.ClientOption) (*
connPool: connPool,
keyManagementClient: kmspb.NewKeyManagementServiceClient(connPool),
CallOptions: &client.CallOptions,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
iamPolicyClient: iampb.NewIAMPolicyClient(connPool),
locationsClient: locationpb.NewLocationsClient(connPool),
}
@@ -1195,9 +1181,7 @@ func (c *keyManagementGRPCClient) Connection() *grpc.ClientConn {
func (c *keyManagementGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
+ c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
}
// Close closes the connection to the API service. The user should invoke this when
@@ -1264,7 +1248,6 @@ func defaultKeyManagementRESTClientOptions() []option.ClientOption {
internaloption.WithDefaultUniverseDomain("googleapis.com"),
internaloption.WithDefaultAudience("https://cloudkms.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableNewAuthLibrary(),
}
}
@@ -1274,9 +1257,7 @@ func defaultKeyManagementRESTClientOptions() []option.ClientOption {
func (c *keyManagementRESTClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
+ c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
}
// Close closes the connection to the API service. The user should invoke this when
@@ -2027,24 +2008,6 @@ func (c *keyManagementGRPCClient) TestIamPermissions(ctx context.Context, req *i
return resp, nil
}
-func (c *keyManagementGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
// ListKeyRings lists KeyRings.
func (c *keyManagementRESTClient) ListKeyRings(ctx context.Context, req *kmspb.ListKeyRingsRequest, opts ...gax.CallOption) *KeyRingIterator {
it := &KeyRingIterator{}
@@ -4470,63 +4433,3 @@ func (c *keyManagementRESTClient) TestIamPermissions(ctx context.Context, req *i
}
return resp, nil
}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *keyManagementRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go
deleted file mode 100644
index 167f4362333..00000000000
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey.pb.go
+++ /dev/null
@@ -1,828 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.3
-// source: google/cloud/kms/v1/autokey.proto
-
-package kmspb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Request message for
-// [Autokey.CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle].
-type CreateKeyHandleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the resource project and location to create the
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] in, e.g.
- // `projects/{PROJECT_ID}/locations/{LOCATION}`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Optional. Id of the [KeyHandle][google.cloud.kms.v1.KeyHandle]. Must be
- // unique to the resource project and location. If not provided by the caller,
- // a new UUID is used.
- KeyHandleId string `protobuf:"bytes,2,opt,name=key_handle_id,json=keyHandleId,proto3" json:"key_handle_id,omitempty"`
- // Required. [KeyHandle][google.cloud.kms.v1.KeyHandle] to create.
- KeyHandle *KeyHandle `protobuf:"bytes,3,opt,name=key_handle,json=keyHandle,proto3" json:"key_handle,omitempty"`
-}
-
-func (x *CreateKeyHandleRequest) Reset() {
- *x = CreateKeyHandleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateKeyHandleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateKeyHandleRequest) ProtoMessage() {}
-
-func (x *CreateKeyHandleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateKeyHandleRequest.ProtoReflect.Descriptor instead.
-func (*CreateKeyHandleRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *CreateKeyHandleRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateKeyHandleRequest) GetKeyHandleId() string {
- if x != nil {
- return x.KeyHandleId
- }
- return ""
-}
-
-func (x *CreateKeyHandleRequest) GetKeyHandle() *KeyHandle {
- if x != nil {
- return x.KeyHandle
- }
- return nil
-}
-
-// Request message for [GetKeyHandle][google.cloud.kms.v1.Autokey.GetKeyHandle].
-type GetKeyHandleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle] resource,
- // e.g.
- // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetKeyHandleRequest) Reset() {
- *x = GetKeyHandleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetKeyHandleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetKeyHandleRequest) ProtoMessage() {}
-
-func (x *GetKeyHandleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetKeyHandleRequest.ProtoReflect.Descriptor instead.
-func (*GetKeyHandleRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *GetKeyHandleRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Resource-oriented representation of a request to Cloud KMS Autokey and the
-// resulting provisioning of a [CryptoKey][google.cloud.kms.v1.CryptoKey].
-type KeyHandle struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Identifier. Name of the [KeyHandle][google.cloud.kms.v1.KeyHandle]
- // resource, e.g.
- // `projects/{PROJECT_ID}/locations/{LOCATION}/keyHandles/{KEY_HANDLE_ID}`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Output only. Name of a [CryptoKey][google.cloud.kms.v1.CryptoKey] that has
- // been provisioned for Customer Managed Encryption Key (CMEK) use in the
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] project and location for the
- // requested resource type. The [CryptoKey][google.cloud.kms.v1.CryptoKey]
- // project will reflect the value configured in the
- // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] on the resource
- // project's ancestor folder at the time of the
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation. If more than one
- // ancestor folder has a configured
- // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig], the nearest of these
- // configurations is used.
- KmsKey string `protobuf:"bytes,3,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
- // Required. Indicates the resource type that the resulting
- // [CryptoKey][google.cloud.kms.v1.CryptoKey] is meant to protect, e.g.
- // `{SERVICE}.googleapis.com/{TYPE}`. See documentation for supported resource
- // types.
- ResourceTypeSelector string `protobuf:"bytes,4,opt,name=resource_type_selector,json=resourceTypeSelector,proto3" json:"resource_type_selector,omitempty"`
-}
-
-func (x *KeyHandle) Reset() {
- *x = KeyHandle{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeyHandle) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeyHandle) ProtoMessage() {}
-
-func (x *KeyHandle) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeyHandle.ProtoReflect.Descriptor instead.
-func (*KeyHandle) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *KeyHandle) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *KeyHandle) GetKmsKey() string {
- if x != nil {
- return x.KmsKey
- }
- return ""
-}
-
-func (x *KeyHandle) GetResourceTypeSelector() string {
- if x != nil {
- return x.ResourceTypeSelector
- }
- return ""
-}
-
-// Metadata message for
-// [CreateKeyHandle][google.cloud.kms.v1.Autokey.CreateKeyHandle] long-running
-// operation response.
-type CreateKeyHandleMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *CreateKeyHandleMetadata) Reset() {
- *x = CreateKeyHandleMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateKeyHandleMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateKeyHandleMetadata) ProtoMessage() {}
-
-func (x *CreateKeyHandleMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateKeyHandleMetadata.ProtoReflect.Descriptor instead.
-func (*CreateKeyHandleMetadata) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{3}
-}
-
-// Request message for
-// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles].
-type ListKeyHandlesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the resource project and location from which to list
- // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g.
- // `projects/{PROJECT_ID}/locations/{LOCATION}`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Optional. Filter to apply when listing
- // [KeyHandles][google.cloud.kms.v1.KeyHandle], e.g.
- // `resource_type_selector="{SERVICE}.googleapis.com/{TYPE}"`.
- Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
-}
-
-func (x *ListKeyHandlesRequest) Reset() {
- *x = ListKeyHandlesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListKeyHandlesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListKeyHandlesRequest) ProtoMessage() {}
-
-func (x *ListKeyHandlesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListKeyHandlesRequest.ProtoReflect.Descriptor instead.
-func (*ListKeyHandlesRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *ListKeyHandlesRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListKeyHandlesRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-// Response message for
-// [Autokey.ListKeyHandles][google.cloud.kms.v1.Autokey.ListKeyHandles].
-type ListKeyHandlesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Resulting [KeyHandles][google.cloud.kms.v1.KeyHandle].
- KeyHandles []*KeyHandle `protobuf:"bytes,1,rep,name=key_handles,json=keyHandles,proto3" json:"key_handles,omitempty"`
-}
-
-func (x *ListKeyHandlesResponse) Reset() {
- *x = ListKeyHandlesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListKeyHandlesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListKeyHandlesResponse) ProtoMessage() {}
-
-func (x *ListKeyHandlesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListKeyHandlesResponse.ProtoReflect.Descriptor instead.
-func (*ListKeyHandlesResponse) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *ListKeyHandlesResponse) GetKeyHandles() []*KeyHandle {
- if x != nil {
- return x.KeyHandles
- }
- return nil
-}
-
-var File_google_cloud_kms_v1_autokey_proto protoreflect.FileDescriptor
-
-var file_google_cloud_kms_v1_autokey_proto_rawDesc = []byte{
- 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b,
- 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
- 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2f,
- 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61,
- 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x27,
- 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x6b, 0x65, 0x79, 0x48,
- 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68,
- 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x09, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x54, 0x0a, 0x13, 0x47,
- 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
- 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x22, 0xa3, 0x02, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12,
- 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f,
- 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x03, 0xfa, 0x41,
- 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x16,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x65,
- 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x53,
- 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x3a, 0x7e, 0xea, 0x41, 0x7b, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12,
- 0x3f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64,
- 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x7d,
- 0x2a, 0x0a, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0x09, 0x6b, 0x65,
- 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x22, 0x77, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e,
- 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4c, 0x6f,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b,
- 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x16, 0x4c,
- 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e,
- 0x64, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x48,
- 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x32, 0xb4, 0x05, 0x0a, 0x07, 0x41, 0x75, 0x74, 0x6f, 0x6b,
- 0x65, 0x79, 0x12, 0xeb, 0x01, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79,
- 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
- 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0x8b, 0x01, 0xca, 0x41, 0x24, 0x0a, 0x09, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e,
- 0x64, 0x6c, 0x65, 0x12, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x48, 0x61,
- 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x1f, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
- 0x2c, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
- 0x22, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73,
- 0x12, 0x97, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c,
- 0x65, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61,
- 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
- 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x3d, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f,
- 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2f, 0x6b, 0x65, 0x79,
- 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xaa, 0x01, 0x0a, 0x0e, 0x4c,
- 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
- 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c,
- 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x12, 0x2e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6b, 0x65, 0x79,
- 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x74, 0xca, 0x41, 0x17, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
- 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74,
- 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x42, 0x54, 0x0a,
- 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65,
- 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d,
- 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d,
- 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_cloud_kms_v1_autokey_proto_rawDescOnce sync.Once
- file_google_cloud_kms_v1_autokey_proto_rawDescData = file_google_cloud_kms_v1_autokey_proto_rawDesc
-)
-
-func file_google_cloud_kms_v1_autokey_proto_rawDescGZIP() []byte {
- file_google_cloud_kms_v1_autokey_proto_rawDescOnce.Do(func() {
- file_google_cloud_kms_v1_autokey_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_proto_rawDescData)
- })
- return file_google_cloud_kms_v1_autokey_proto_rawDescData
-}
-
-var file_google_cloud_kms_v1_autokey_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_google_cloud_kms_v1_autokey_proto_goTypes = []interface{}{
- (*CreateKeyHandleRequest)(nil), // 0: google.cloud.kms.v1.CreateKeyHandleRequest
- (*GetKeyHandleRequest)(nil), // 1: google.cloud.kms.v1.GetKeyHandleRequest
- (*KeyHandle)(nil), // 2: google.cloud.kms.v1.KeyHandle
- (*CreateKeyHandleMetadata)(nil), // 3: google.cloud.kms.v1.CreateKeyHandleMetadata
- (*ListKeyHandlesRequest)(nil), // 4: google.cloud.kms.v1.ListKeyHandlesRequest
- (*ListKeyHandlesResponse)(nil), // 5: google.cloud.kms.v1.ListKeyHandlesResponse
- (*longrunningpb.Operation)(nil), // 6: google.longrunning.Operation
-}
-var file_google_cloud_kms_v1_autokey_proto_depIdxs = []int32{
- 2, // 0: google.cloud.kms.v1.CreateKeyHandleRequest.key_handle:type_name -> google.cloud.kms.v1.KeyHandle
- 2, // 1: google.cloud.kms.v1.ListKeyHandlesResponse.key_handles:type_name -> google.cloud.kms.v1.KeyHandle
- 0, // 2: google.cloud.kms.v1.Autokey.CreateKeyHandle:input_type -> google.cloud.kms.v1.CreateKeyHandleRequest
- 1, // 3: google.cloud.kms.v1.Autokey.GetKeyHandle:input_type -> google.cloud.kms.v1.GetKeyHandleRequest
- 4, // 4: google.cloud.kms.v1.Autokey.ListKeyHandles:input_type -> google.cloud.kms.v1.ListKeyHandlesRequest
- 6, // 5: google.cloud.kms.v1.Autokey.CreateKeyHandle:output_type -> google.longrunning.Operation
- 2, // 6: google.cloud.kms.v1.Autokey.GetKeyHandle:output_type -> google.cloud.kms.v1.KeyHandle
- 5, // 7: google.cloud.kms.v1.Autokey.ListKeyHandles:output_type -> google.cloud.kms.v1.ListKeyHandlesResponse
- 5, // [5:8] is the sub-list for method output_type
- 2, // [2:5] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_google_cloud_kms_v1_autokey_proto_init() }
-func file_google_cloud_kms_v1_autokey_proto_init() {
- if File_google_cloud_kms_v1_autokey_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_cloud_kms_v1_autokey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateKeyHandleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetKeyHandleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*KeyHandle); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateKeyHandleMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListKeyHandlesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListKeyHandlesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_cloud_kms_v1_autokey_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 6,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_google_cloud_kms_v1_autokey_proto_goTypes,
- DependencyIndexes: file_google_cloud_kms_v1_autokey_proto_depIdxs,
- MessageInfos: file_google_cloud_kms_v1_autokey_proto_msgTypes,
- }.Build()
- File_google_cloud_kms_v1_autokey_proto = out.File
- file_google_cloud_kms_v1_autokey_proto_rawDesc = nil
- file_google_cloud_kms_v1_autokey_proto_goTypes = nil
- file_google_cloud_kms_v1_autokey_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// AutokeyClient is the client API for Autokey service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AutokeyClient interface {
- // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the
- // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK
- // use with the given resource type in the configured key project and the same
- // location. [GetOperation][Operations.GetOperation] should be used to resolve
- // the resulting long-running operation and get the resulting
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] and
- // [CryptoKey][google.cloud.kms.v1.CryptoKey].
- CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle].
- GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error)
- // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle].
- ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error)
-}
-
-type autokeyClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewAutokeyClient(cc grpc.ClientConnInterface) AutokeyClient {
- return &autokeyClient{cc}
-}
-
-func (c *autokeyClient) CreateKeyHandle(ctx context.Context, in *CreateKeyHandleRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/CreateKeyHandle", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *autokeyClient) GetKeyHandle(ctx context.Context, in *GetKeyHandleRequest, opts ...grpc.CallOption) (*KeyHandle, error) {
- out := new(KeyHandle)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/GetKeyHandle", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *autokeyClient) ListKeyHandles(ctx context.Context, in *ListKeyHandlesRequest, opts ...grpc.CallOption) (*ListKeyHandlesResponse, error) {
- out := new(ListKeyHandlesResponse)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.Autokey/ListKeyHandles", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AutokeyServer is the server API for Autokey service.
-type AutokeyServer interface {
- // Creates a new [KeyHandle][google.cloud.kms.v1.KeyHandle], triggering the
- // provisioning of a new [CryptoKey][google.cloud.kms.v1.CryptoKey] for CMEK
- // use with the given resource type in the configured key project and the same
- // location. [GetOperation][Operations.GetOperation] should be used to resolve
- // the resulting long-running operation and get the resulting
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] and
- // [CryptoKey][google.cloud.kms.v1.CryptoKey].
- CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error)
- // Returns the [KeyHandle][google.cloud.kms.v1.KeyHandle].
- GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error)
- // Lists [KeyHandles][google.cloud.kms.v1.KeyHandle].
- ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error)
-}
-
-// UnimplementedAutokeyServer can be embedded to have forward compatible implementations.
-type UnimplementedAutokeyServer struct {
-}
-
-func (*UnimplementedAutokeyServer) CreateKeyHandle(context.Context, *CreateKeyHandleRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateKeyHandle not implemented")
-}
-func (*UnimplementedAutokeyServer) GetKeyHandle(context.Context, *GetKeyHandleRequest) (*KeyHandle, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetKeyHandle not implemented")
-}
-func (*UnimplementedAutokeyServer) ListKeyHandles(context.Context, *ListKeyHandlesRequest) (*ListKeyHandlesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListKeyHandles not implemented")
-}
-
-func RegisterAutokeyServer(s *grpc.Server, srv AutokeyServer) {
- s.RegisterService(&_Autokey_serviceDesc, srv)
-}
-
-func _Autokey_CreateKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateKeyHandleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyServer).CreateKeyHandle(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.Autokey/CreateKeyHandle",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyServer).CreateKeyHandle(ctx, req.(*CreateKeyHandleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Autokey_GetKeyHandle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetKeyHandleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyServer).GetKeyHandle(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.Autokey/GetKeyHandle",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyServer).GetKeyHandle(ctx, req.(*GetKeyHandleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Autokey_ListKeyHandles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListKeyHandlesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyServer).ListKeyHandles(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.Autokey/ListKeyHandles",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyServer).ListKeyHandles(ctx, req.(*ListKeyHandlesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Autokey_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.cloud.kms.v1.Autokey",
- HandlerType: (*AutokeyServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "CreateKeyHandle",
- Handler: _Autokey_CreateKeyHandle_Handler,
- },
- {
- MethodName: "GetKeyHandle",
- Handler: _Autokey_GetKeyHandle_Handler,
- },
- {
- MethodName: "ListKeyHandles",
- Handler: _Autokey_ListKeyHandles_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/cloud/kms/v1/autokey.proto",
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go
deleted file mode 100644
index 972ec637bfc..00000000000
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/autokey_admin.pb.go
+++ /dev/null
@@ -1,733 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.3
-// source: google/cloud/kms/v1/autokey_admin.proto
-
-package kmspb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Request message for
-// [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig].
-type UpdateAutokeyConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] with values to
- // update.
- AutokeyConfig *AutokeyConfig `protobuf:"bytes,1,opt,name=autokey_config,json=autokeyConfig,proto3" json:"autokey_config,omitempty"`
- // Required. Masks which fields of the
- // [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] to update, e.g.
- // `keyProject`.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateAutokeyConfigRequest) Reset() {
- *x = UpdateAutokeyConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateAutokeyConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateAutokeyConfigRequest) ProtoMessage() {}
-
-func (x *UpdateAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateAutokeyConfigRequest.ProtoReflect.Descriptor instead.
-func (*UpdateAutokeyConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *UpdateAutokeyConfigRequest) GetAutokeyConfig() *AutokeyConfig {
- if x != nil {
- return x.AutokeyConfig
- }
- return nil
-}
-
-func (x *UpdateAutokeyConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-// Request message for
-// [GetAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig].
-type GetAutokeyConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig]
- // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetAutokeyConfigRequest) Reset() {
- *x = GetAutokeyConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetAutokeyConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetAutokeyConfigRequest) ProtoMessage() {}
-
-func (x *GetAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetAutokeyConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetAutokeyConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *GetAutokeyConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Cloud KMS Autokey configuration for a folder.
-type AutokeyConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Identifier. Name of the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig]
- // resource, e.g. `folders/{FOLDER_NUMBER}/autokeyConfig`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Optional. Name of the key project, e.g. `projects/{PROJECT_ID}` or
- // `projects/{PROJECT_NUMBER}`, where Cloud KMS Autokey will provision a new
- // [CryptoKey][google.cloud.kms.v1.CryptoKey] when a
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] is created. On
- // [UpdateAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig],
- // the caller will require `cloudkms.cryptoKeys.setIamPolicy` permission on
- // this key project. Once configured, for Cloud KMS Autokey to function
- // properly, this key project must have the Cloud KMS API activated and the
- // Cloud KMS Service Agent for this key project must be granted the
- // `cloudkms.admin` role (or pertinent permissions). A request with an empty
- // key project field will clear the configuration.
- KeyProject string `protobuf:"bytes,2,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"`
-}
-
-func (x *AutokeyConfig) Reset() {
- *x = AutokeyConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AutokeyConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AutokeyConfig) ProtoMessage() {}
-
-func (x *AutokeyConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AutokeyConfig.ProtoReflect.Descriptor instead.
-func (*AutokeyConfig) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *AutokeyConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *AutokeyConfig) GetKeyProject() string {
- if x != nil {
- return x.KeyProject
- }
- return ""
-}
-
-// Request message for
-// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig].
-type ShowEffectiveAutokeyConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the resource project to the show effective Cloud KMS
- // Autokey configuration for. This may be helpful for interrogating the effect
- // of nested folder configurations on a given resource project.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
-}
-
-func (x *ShowEffectiveAutokeyConfigRequest) Reset() {
- *x = ShowEffectiveAutokeyConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ShowEffectiveAutokeyConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ShowEffectiveAutokeyConfigRequest) ProtoMessage() {}
-
-func (x *ShowEffectiveAutokeyConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ShowEffectiveAutokeyConfigRequest.ProtoReflect.Descriptor instead.
-func (*ShowEffectiveAutokeyConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ShowEffectiveAutokeyConfigRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-// Response message for
-// [ShowEffectiveAutokeyConfig][google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig].
-type ShowEffectiveAutokeyConfigResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Name of the key project configured in the resource project's folder
- // ancestry.
- KeyProject string `protobuf:"bytes,1,opt,name=key_project,json=keyProject,proto3" json:"key_project,omitempty"`
-}
-
-func (x *ShowEffectiveAutokeyConfigResponse) Reset() {
- *x = ShowEffectiveAutokeyConfigResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ShowEffectiveAutokeyConfigResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ShowEffectiveAutokeyConfigResponse) ProtoMessage() {}
-
-func (x *ShowEffectiveAutokeyConfigResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ShowEffectiveAutokeyConfigResponse.ProtoReflect.Descriptor instead.
-func (*ShowEffectiveAutokeyConfigResponse) Descriptor() ([]byte, []int) {
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *ShowEffectiveAutokeyConfigResponse) GetKeyProject() string {
- if x != nil {
- return x.KeyProject
- }
- return ""
-}
-
-var File_google_cloud_kms_v1_autokey_admin_proto protoreflect.FileDescriptor
-
-var file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = []byte{
- 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x6b,
- 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
- 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
- 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75,
- 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
- 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b,
- 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41,
- 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x22, 0xb9, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a,
- 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x6b, 0x65, 0x79, 0x50, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x3a, 0x69, 0xea, 0x41, 0x66, 0x0a, 0x25, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
- 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x1e, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72,
- 0x7d, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2a,
- 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x32,
- 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x70,
- 0x0a, 0x21, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41,
- 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x22, 0x45, 0x0a, 0x22, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6b, 0x65, 0x79,
- 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x32, 0xc8, 0x05, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x6f,
- 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xd2, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x75, 0x74,
- 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x66, 0xda, 0x41, 0x1a, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65,
- 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
- 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x3a, 0x0e, 0x61, 0x75, 0x74, 0x6f,
- 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x31, 0x2f, 0x76, 0x31, 0x2f,
- 0x7b, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
- 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61,
- 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x97, 0x01,
- 0x0a, 0x10, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x6f,
- 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66,
- 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0xd2, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x6f, 0x77,
- 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f,
- 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65,
- 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d,
- 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69,
- 0x76, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x3a, 0x73, 0x68, 0x6f, 0x77, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41,
- 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x74, 0xca, 0x41,
- 0x17, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x57, 0x68, 0x74, 0x74, 0x70, 0x73,
- 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
- 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
- 0x6d, 0x73, 0x42, 0x59, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x41,
- 0x75, 0x74, 0x6f, 0x6b, 0x65, 0x79, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76,
- 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce sync.Once
- file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = file_google_cloud_kms_v1_autokey_admin_proto_rawDesc
-)
-
-func file_google_cloud_kms_v1_autokey_admin_proto_rawDescGZIP() []byte {
- file_google_cloud_kms_v1_autokey_admin_proto_rawDescOnce.Do(func() {
- file_google_cloud_kms_v1_autokey_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_cloud_kms_v1_autokey_admin_proto_rawDescData)
- })
- return file_google_cloud_kms_v1_autokey_admin_proto_rawDescData
-}
-
-var file_google_cloud_kms_v1_autokey_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
-var file_google_cloud_kms_v1_autokey_admin_proto_goTypes = []interface{}{
- (*UpdateAutokeyConfigRequest)(nil), // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest
- (*GetAutokeyConfigRequest)(nil), // 1: google.cloud.kms.v1.GetAutokeyConfigRequest
- (*AutokeyConfig)(nil), // 2: google.cloud.kms.v1.AutokeyConfig
- (*ShowEffectiveAutokeyConfigRequest)(nil), // 3: google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest
- (*ShowEffectiveAutokeyConfigResponse)(nil), // 4: google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse
- (*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask
-}
-var file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = []int32{
- 2, // 0: google.cloud.kms.v1.UpdateAutokeyConfigRequest.autokey_config:type_name -> google.cloud.kms.v1.AutokeyConfig
- 5, // 1: google.cloud.kms.v1.UpdateAutokeyConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
- 0, // 2: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:input_type -> google.cloud.kms.v1.UpdateAutokeyConfigRequest
- 1, // 3: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:input_type -> google.cloud.kms.v1.GetAutokeyConfigRequest
- 3, // 4: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:input_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigRequest
- 2, // 5: google.cloud.kms.v1.AutokeyAdmin.UpdateAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig
- 2, // 6: google.cloud.kms.v1.AutokeyAdmin.GetAutokeyConfig:output_type -> google.cloud.kms.v1.AutokeyConfig
- 4, // 7: google.cloud.kms.v1.AutokeyAdmin.ShowEffectiveAutokeyConfig:output_type -> google.cloud.kms.v1.ShowEffectiveAutokeyConfigResponse
- 5, // [5:8] is the sub-list for method output_type
- 2, // [2:5] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_google_cloud_kms_v1_autokey_admin_proto_init() }
-func file_google_cloud_kms_v1_autokey_admin_proto_init() {
- if File_google_cloud_kms_v1_autokey_admin_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateAutokeyConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetAutokeyConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AutokeyConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShowEffectiveAutokeyConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_cloud_kms_v1_autokey_admin_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ShowEffectiveAutokeyConfigResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_cloud_kms_v1_autokey_admin_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 5,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_google_cloud_kms_v1_autokey_admin_proto_goTypes,
- DependencyIndexes: file_google_cloud_kms_v1_autokey_admin_proto_depIdxs,
- MessageInfos: file_google_cloud_kms_v1_autokey_admin_proto_msgTypes,
- }.Build()
- File_google_cloud_kms_v1_autokey_admin_proto = out.File
- file_google_cloud_kms_v1_autokey_admin_proto_rawDesc = nil
- file_google_cloud_kms_v1_autokey_admin_proto_goTypes = nil
- file_google_cloud_kms_v1_autokey_admin_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// AutokeyAdminClient is the client API for AutokeyAdmin service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AutokeyAdminClient interface {
- // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
- // folder. The caller must have both `cloudkms.autokeyConfigs.update`
- // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy`
- // permission on the provided key project. A
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's
- // descendant projects will use this configuration to determine where to
- // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey].
- UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error)
- // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
- // folder.
- GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error)
- // Returns the effective Cloud KMS Autokey configuration for a given project.
- ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error)
-}
-
-type autokeyAdminClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewAutokeyAdminClient(cc grpc.ClientConnInterface) AutokeyAdminClient {
- return &autokeyAdminClient{cc}
-}
-
-func (c *autokeyAdminClient) UpdateAutokeyConfig(ctx context.Context, in *UpdateAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) {
- out := new(AutokeyConfig)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *autokeyAdminClient) GetAutokeyConfig(ctx context.Context, in *GetAutokeyConfigRequest, opts ...grpc.CallOption) (*AutokeyConfig, error) {
- out := new(AutokeyConfig)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *autokeyAdminClient) ShowEffectiveAutokeyConfig(ctx context.Context, in *ShowEffectiveAutokeyConfigRequest, opts ...grpc.CallOption) (*ShowEffectiveAutokeyConfigResponse, error) {
- out := new(ShowEffectiveAutokeyConfigResponse)
- err := c.cc.Invoke(ctx, "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AutokeyAdminServer is the server API for AutokeyAdmin service.
-type AutokeyAdminServer interface {
- // Updates the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
- // folder. The caller must have both `cloudkms.autokeyConfigs.update`
- // permission on the parent folder and `cloudkms.cryptoKeys.setIamPolicy`
- // permission on the provided key project. A
- // [KeyHandle][google.cloud.kms.v1.KeyHandle] creation in the folder's
- // descendant projects will use this configuration to determine where to
- // create the resulting [CryptoKey][google.cloud.kms.v1.CryptoKey].
- UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error)
- // Returns the [AutokeyConfig][google.cloud.kms.v1.AutokeyConfig] for a
- // folder.
- GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error)
- // Returns the effective Cloud KMS Autokey configuration for a given project.
- ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error)
-}
-
-// UnimplementedAutokeyAdminServer can be embedded to have forward compatible implementations.
-type UnimplementedAutokeyAdminServer struct {
-}
-
-func (*UnimplementedAutokeyAdminServer) UpdateAutokeyConfig(context.Context, *UpdateAutokeyConfigRequest) (*AutokeyConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateAutokeyConfig not implemented")
-}
-func (*UnimplementedAutokeyAdminServer) GetAutokeyConfig(context.Context, *GetAutokeyConfigRequest) (*AutokeyConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetAutokeyConfig not implemented")
-}
-func (*UnimplementedAutokeyAdminServer) ShowEffectiveAutokeyConfig(context.Context, *ShowEffectiveAutokeyConfigRequest) (*ShowEffectiveAutokeyConfigResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ShowEffectiveAutokeyConfig not implemented")
-}
-
-func RegisterAutokeyAdminServer(s *grpc.Server, srv AutokeyAdminServer) {
- s.RegisterService(&_AutokeyAdmin_serviceDesc, srv)
-}
-
-func _AutokeyAdmin_UpdateAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateAutokeyConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/UpdateAutokeyConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyAdminServer).UpdateAutokeyConfig(ctx, req.(*UpdateAutokeyConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _AutokeyAdmin_GetAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetAutokeyConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/GetAutokeyConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyAdminServer).GetAutokeyConfig(ctx, req.(*GetAutokeyConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ShowEffectiveAutokeyConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.kms.v1.AutokeyAdmin/ShowEffectiveAutokeyConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AutokeyAdminServer).ShowEffectiveAutokeyConfig(ctx, req.(*ShowEffectiveAutokeyConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _AutokeyAdmin_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.cloud.kms.v1.AutokeyAdmin",
- HandlerType: (*AutokeyAdminServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "UpdateAutokeyConfig",
- Handler: _AutokeyAdmin_UpdateAutokeyConfig_Handler,
- },
- {
- MethodName: "GetAutokeyConfig",
- Handler: _AutokeyAdmin_GetAutokeyConfig_Handler,
- },
- {
- MethodName: "ShowEffectiveAutokeyConfig",
- Handler: _AutokeyAdmin_ShowEffectiveAutokeyConfig_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/cloud/kms/v1/autokey_admin.proto",
-}
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
index 20329707472..632ca94ab52 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/ekm_service.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/ekm_service.proto
package kmspb
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
index 31d613edf82..3cc89947e81 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/resources.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/resources.proto
package kmspb
@@ -357,8 +357,6 @@ const (
// Other hash functions can also be used:
// https://cloud.google.com/kms/docs/create-validate-signatures#ecdsa_support_for_other_hash_algorithms
CryptoKeyVersion_EC_SIGN_SECP256K1_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 31
- // EdDSA on the Curve25519 in pure mode (taking data as input).
- CryptoKeyVersion_EC_SIGN_ED25519 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 40
// HMAC-SHA256 signing with a 256 bit key.
CryptoKeyVersion_HMAC_SHA256 CryptoKeyVersion_CryptoKeyVersionAlgorithm = 32
// HMAC-SHA1 signing with a 160 bit key.
@@ -405,7 +403,6 @@ var (
12: "EC_SIGN_P256_SHA256",
13: "EC_SIGN_P384_SHA384",
31: "EC_SIGN_SECP256K1_SHA256",
- 40: "EC_SIGN_ED25519",
32: "HMAC_SHA256",
33: "HMAC_SHA1",
34: "HMAC_SHA384",
@@ -443,7 +440,6 @@ var (
"EC_SIGN_P256_SHA256": 12,
"EC_SIGN_P384_SHA384": 13,
"EC_SIGN_SECP256K1_SHA256": 31,
- "EC_SIGN_ED25519": 40,
"HMAC_SHA256": 32,
"HMAC_SHA1": 33,
"HMAC_SHA384": 34,
@@ -1497,18 +1493,22 @@ type PublicKey struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The public key, encoded in PEM format. For more information, see the
- // [RFC 7468](https://tools.ietf.org/html/rfc7468) sections for
- // [General Considerations](https://tools.ietf.org/html/rfc7468#section-2) and
- // [Textual Encoding of Subject Public Key Info]
+ // A public key encoded in PEM format, populated only when
+ // [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
+ // returns one key. For more information, see the [RFC
+ // 7468](https://tools.ietf.org/html/rfc7468) sections for [General
+ // Considerations](https://tools.ietf.org/html/rfc7468#section-2) and [Textual
+ // Encoding of Subject Public Key Info]
// (https://tools.ietf.org/html/rfc7468#section-13).
Pem string `protobuf:"bytes,1,opt,name=pem,proto3" json:"pem,omitempty"`
// The
// [Algorithm][google.cloud.kms.v1.CryptoKeyVersion.CryptoKeyVersionAlgorithm]
// associated with this key.
Algorithm CryptoKeyVersion_CryptoKeyVersionAlgorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=google.cloud.kms.v1.CryptoKeyVersion_CryptoKeyVersionAlgorithm" json:"algorithm,omitempty"`
- // Integrity verification field. A CRC32C checksum of the returned
- // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. An integrity check of
+ // Integrity verification field: A CRC32C checksum of the returned
+ // [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem]. It is only populated
+ // when [GetPublicKey][google.cloud.kms.v1.KeyManagementService.GetPublicKey]
+ // returns one key. An integrity check of
// [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] can be performed by
// computing the CRC32C checksum of
// [PublicKey.pem][google.cloud.kms.v1.PublicKey.pem] and comparing your
@@ -2118,7 +2118,7 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56,
0x49, 0x55, 0x4d, 0x5f, 0x56, 0x31, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45,
0x44, 0x10, 0x03, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41, 0x56, 0x49, 0x55, 0x4d, 0x5f, 0x56, 0x32,
- 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x04, 0x22, 0x89, 0x15,
+ 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x45, 0x44, 0x10, 0x04, 0x22, 0xf4, 0x14,
0x0a, 0x10, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x05, 0x73,
@@ -2191,7 +2191,7 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x11,
0x72, 0x65, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c,
0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x72, 0x65,
- 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x22, 0xe2,
+ 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x22, 0xcd,
0x07, 0x0a, 0x19, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x2c, 0x0a, 0x28,
0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49,
@@ -2245,180 +2245,178 @@ var file_google_cloud_kms_v1_resources_proto_rawDesc = []byte{
0x0c, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x43, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x33, 0x38,
0x34, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x0d, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x43,
0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x4b, 0x31, 0x5f,
- 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x43, 0x5f, 0x53,
- 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x28, 0x12, 0x0f, 0x0a,
- 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d,
- 0x0a, 0x09, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a,
- 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f,
- 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12,
- 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24,
- 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d,
- 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f,
- 0x4e, 0x10, 0x12, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a,
- 0x24, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53,
- 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49,
- 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12,
- 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08,
- 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45,
- 0x53, 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53,
- 0x54, 0x52, 0x4f, 0x59, 0x5f, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04,
- 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f,
- 0x52, 0x54, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46,
- 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, 0x4e, 0x45, 0x52,
- 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x20,
- 0x0a, 0x1c, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e,
- 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09,
- 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53,
- 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10,
- 0x0a, 0x22, 0x49, 0x0a, 0x14, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59,
- 0x50, 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
- 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
- 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea,
- 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f,
- 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65,
- 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75,
- 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67,
- 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61,
- 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f,
- 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49,
- 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72,
- 0x63, 0x33, 0x32, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01,
- 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63,
- 0x4b, 0x65, 0x79, 0x12, 0x84, 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65,
- 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67,
- 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f,
- 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d,
- 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49,
- 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49,
- 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69,
- 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70,
- 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02,
- 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
- 0x65, 0x76, 0x65, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b,
- 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b,
- 0x0a, 0x11, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69,
- 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31,
- 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05,
- 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f,
- 0x6b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e,
- 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69,
- 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61,
- 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0x25, 0x0a, 0x11, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x70, 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f,
- 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f,
- 0x52, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f,
- 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45,
- 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f,
- 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45,
- 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f,
- 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f,
- 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41,
- 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35,
- 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52,
- 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41,
- 0x32, 0x35, 0x36, 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45,
- 0x50, 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22,
- 0x63, 0x0a, 0x0e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f,
- 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47,
- 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41,
- 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52,
- 0x45, 0x44, 0x10, 0x03, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
- 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b,
- 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74,
- 0x4a, 0x6f, 0x62, 0x73, 0x2f, 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62,
- 0x7d, 0x22, 0x81, 0x01, 0x0a, 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72,
- 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
- 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35,
- 0x0a, 0x17, 0x65, 0x6b, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x14, 0x65, 0x6b, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
- 0x79, 0x50, 0x61, 0x74, 0x68, 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54,
- 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f,
- 0x46, 0x54, 0x57, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10,
- 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12,
- 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10,
- 0x04, 0x42, 0x88, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b,
- 0x6d, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76,
- 0x31, 0x2f, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01,
- 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x4b, 0x6d, 0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
- 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x1f, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43,
+ 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x20, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x4d, 0x41,
+ 0x43, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x21, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41, 0x43,
+ 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x22, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d, 0x41,
+ 0x43, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x23, 0x12, 0x0f, 0x0a, 0x0b, 0x48, 0x4d,
+ 0x41, 0x43, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x32, 0x34, 0x10, 0x24, 0x12, 0x21, 0x0a, 0x1d, 0x45,
+ 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x59, 0x4d, 0x4d, 0x45, 0x54, 0x52, 0x49,
+ 0x43, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x12, 0x22, 0x9b,
+ 0x02, 0x0a, 0x15, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x43, 0x52, 0x59, 0x50,
+ 0x54, 0x4f, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x53,
+ 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45,
+ 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e,
+ 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42,
+ 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59,
+ 0x45, 0x44, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x53, 0x54, 0x52, 0x4f, 0x59, 0x5f,
+ 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x50,
+ 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x10, 0x06, 0x12,
+ 0x11, 0x0a, 0x0d, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
+ 0x10, 0x07, 0x12, 0x15, 0x0a, 0x11, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x45, 0x4e,
+ 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45,
+ 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x1f, 0x0a, 0x1b, 0x45,
+ 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x0a, 0x22, 0x49, 0x0a, 0x14,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x56, 0x69, 0x65, 0x77, 0x12, 0x27, 0x0a, 0x23, 0x43, 0x52, 0x59, 0x50, 0x54, 0x4f, 0x5f, 0x4b,
+ 0x45, 0x59, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a,
+ 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x01, 0x3a, 0xaa, 0x01, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f,
+ 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73,
+ 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0xce, 0x03, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
+ 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x70, 0x65, 0x6d, 0x12, 0x5d, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x41,
+ 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32,
+ 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x65, 0x6d, 0x43, 0x72, 0x63, 0x33, 0x32, 0x63, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x3a, 0xae, 0x01, 0xea, 0x41, 0xaa, 0x01, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x84,
+ 0x01, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67,
+ 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f,
+ 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b,
+ 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x4a, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x58, 0x0a, 0x0d,
+ 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f,
+ 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
+ 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x44, 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x65,
+ 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x11, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f,
+ 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x12, 0x54, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
+ 0x74, 0x4a, 0x6f, 0x62, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74,
+ 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b,
+ 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x25, 0x0a, 0x11, 0x57,
+ 0x72, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x70, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70,
+ 0x65, 0x6d, 0x22, 0xe5, 0x01, 0x0a, 0x0c, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x45,
+ 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33,
+ 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36,
+ 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34,
+ 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36,
+ 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x33,
+ 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32,
+ 0x35, 0x36, 0x10, 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50,
+ 0x5f, 0x34, 0x30, 0x39, 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x41, 0x45, 0x53,
+ 0x5f, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41,
+ 0x45, 0x50, 0x5f, 0x33, 0x30, 0x37, 0x32, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x05,
+ 0x12, 0x18, 0x0a, 0x14, 0x52, 0x53, 0x41, 0x5f, 0x4f, 0x41, 0x45, 0x50, 0x5f, 0x34, 0x30, 0x39,
+ 0x36, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x22, 0x63, 0x0a, 0x0e, 0x49, 0x6d,
+ 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x1c,
+ 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4a, 0x4f, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45,
+ 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16,
+ 0x0a, 0x12, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45,
+ 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x3a,
+ 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6d,
+ 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
+ 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72,
+ 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4a, 0x6f, 0x62, 0x73, 0x2f,
+ 0x7b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6a, 0x6f, 0x62, 0x7d, 0x22, 0x81, 0x01, 0x0a,
+ 0x1e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x28, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72,
+ 0x6e, 0x61, 0x6c, 0x4b, 0x65, 0x79, 0x55, 0x72, 0x69, 0x12, 0x35, 0x0a, 0x17, 0x65, 0x6b, 0x6d,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x6b, 0x6d, 0x43,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x50, 0x61, 0x74, 0x68,
+ 0x2a, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x50, 0x52, 0x4f, 0x54, 0x45, 0x43, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4f, 0x46, 0x54, 0x57, 0x41, 0x52,
+ 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x48, 0x53, 0x4d, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08,
+ 0x45, 0x58, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58,
+ 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x56, 0x50, 0x43, 0x10, 0x04, 0x42, 0x88, 0x01, 0x0a,
+ 0x17, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x6b, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x4b, 0x6d, 0x73, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x6b, 0x6d, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x6b, 0x6d, 0x73,
+ 0x70, 0x62, 0x3b, 0x6b, 0x6d, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4b, 0x6d, 0x73, 0x2e, 0x56,
+ 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x5c, 0x4b, 0x6d, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
index 6ba1765a4d2..3d2fc5b9927 100644
--- a/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
+++ b/upstream/vendor/cloud.google.com/go/kms/apiv1/kmspb/service.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.1
-// protoc v4.25.3
+// protoc-gen-go v1.32.0
+// protoc v4.25.2
// source: google/cloud/kms/v1/service.proto
package kmspb
diff --git a/upstream/vendor/cloud.google.com/go/kms/internal/version.go b/upstream/vendor/cloud.google.com/go/kms/internal/version.go
index 2d7d8774486..f6b4a3e367d 100644
--- a/upstream/vendor/cloud.google.com/go/kms/internal/version.go
+++ b/upstream/vendor/cloud.google.com/go/kms/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.17.1"
+const Version = "1.15.8"
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/CHANGES.md b/upstream/vendor/cloud.google.com/go/longrunning/CHANGES.md
deleted file mode 100644
index 6c6a7b6661a..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/CHANGES.md
+++ /dev/null
@@ -1,89 +0,0 @@
-# Changes
-
-## [0.5.7](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.6...longrunning/v0.5.7) (2024-05-01)
-
-
-### Bug Fixes
-
-* **longrunning:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-
-## [0.5.6](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.5...longrunning/v0.5.6) (2024-03-14)
-
-
-### Bug Fixes
-
-* **longrunning:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## [0.5.5](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.4...longrunning/v0.5.5) (2024-01-30)
-
-
-### Bug Fixes
-
-* **longrunning:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698))
-
-## [0.5.4](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.3...longrunning/v0.5.4) (2023-11-01)
-
-
-### Bug Fixes
-
-* **longrunning:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
-
-## [0.5.3](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.2...longrunning/v0.5.3) (2023-10-26)
-
-
-### Bug Fixes
-
-* **longrunning:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
-
-## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.1...longrunning/v0.5.2) (2023-10-12)
-
-
-### Bug Fixes
-
-* **longrunning:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-
-## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.5.0...longrunning/v0.5.1) (2023-06-20)
-
-
-### Bug Fixes
-
-* **longrunning:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b))
-
-## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.2...longrunning/v0.5.0) (2023-05-30)
-
-
-### Features
-
-* **longrunning:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6))
-
-## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.1...longrunning/v0.4.2) (2023-05-08)
-
-
-### Bug Fixes
-
-* **longrunning:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef))
-
-## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.0...longrunning/v0.4.1) (2023-02-14)
-
-
-### Bug Fixes
-
-* **longrunning:** Properly parse errors with apierror ([#7392](https://github.com/googleapis/google-cloud-go/issues/7392)) ([e768e48](https://github.com/googleapis/google-cloud-go/commit/e768e487e10b197ba42a2339014136d066190610))
-
-## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.3.0...longrunning/v0.4.0) (2023-01-04)
-
-
-### Features
-
-* **longrunning:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
-
-## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03)
-
-
-### Features
-
-* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
-
-## v0.1.0
-
-Initial release.
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/LICENSE b/upstream/vendor/cloud.google.com/go/longrunning/LICENSE
deleted file mode 100644
index d6456956733..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/README.md b/upstream/vendor/cloud.google.com/go/longrunning/README.md
deleted file mode 100644
index a07f3093fd3..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# longrunning
-
-[](https://pkg.go.dev/cloud.google.com/go/longrunning)
-
-A helper library for working with long running operations.
-
-## Install
-
-```bash
-go get cloud.google.com/go/longrunning
-```
-
-## Go Version Support
-
-See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
-section in the root directory's README.
-
-## Contributing
-
-Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
-document for details.
-
-Please note that this project is released with a Contributor Code of Conduct.
-By participating in this project you agree to abide by its terms. See
-[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
-for more information.
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go
deleted file mode 100644
index a42e61e99c3..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/auxiliary.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package longrunning
-
-import (
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- "google.golang.org/api/iterator"
-)
-
-// OperationIterator manages a stream of *longrunningpb.Operation.
-type OperationIterator struct {
- items []*longrunningpb.Operation
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *OperationIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
- var item *longrunningpb.Operation
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *OperationIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *OperationIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/doc.go
deleted file mode 100644
index 7976ed73455..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/doc.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-// Package longrunning is an auto-generated package for the
-// Long Running Operations API.
-//
-// # General documentation
-//
-// For information that is relevant for all client libraries please reference
-// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
-// page includes:
-//
-// - [Authentication and Authorization]
-// - [Timeouts and Cancellation]
-// - [Testing against Client Libraries]
-// - [Debugging Client Libraries]
-// - [Inspecting errors]
-//
-// # Example usage
-//
-// To get started with this package, create a client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := longrunning.NewOperationsClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// The client will use your default application credentials. Clients should be reused instead of created as needed.
-// The methods of Client are safe for concurrent use by multiple goroutines.
-// The returned client must be Closed when it is done being used.
-//
-// # Using the Client
-//
-// The following is an example of making an API call with the newly created client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := longrunning.NewOperationsClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// req := &longrunningpb.CancelOperationRequest{
-// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#CancelOperationRequest.
-// }
-// err = c.CancelOperation(ctx, req)
-// if err != nil {
-// // TODO: Handle error.
-// }
-//
-// # Use of Context
-//
-// The ctx passed to NewOperationsClient is used for authentication requests and
-// for creating the underlying connection, but is not used for subsequent calls.
-// Individual methods on the client use the ctx given to them.
-//
-// To close the open connection, use the Close() method.
-//
-// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
-// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
-// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
-// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
-// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
-package longrunning // import "cloud.google.com/go/longrunning/autogen"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "",
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go
deleted file mode 100644
index f09714b9b32..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2020, Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package longrunning
-
-import (
- "context"
-
- "google.golang.org/api/option"
- "google.golang.org/grpc"
-)
-
-// InternalFromConn is for use by the Google Cloud Libraries only.
-//
-// Deprecated. Use `NewOperationsClient(ctx, option.WithGRPCConn(conn))` instead.
-func InternalFromConn(conn *grpc.ClientConn) *OperationsClient {
- c, _ := NewOperationsClient(context.Background(), option.WithGRPCConn(conn))
- return c
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/upstream/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json
deleted file mode 100644
index 52714282166..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json
+++ /dev/null
@@ -1,73 +0,0 @@
-{
- "schema": "1.0",
- "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
- "language": "go",
- "protoPackage": "google.longrunning",
- "libraryPackage": "cloud.google.com/go/longrunning/autogen",
- "services": {
- "Operations": {
- "clients": {
- "grpc": {
- "libraryClient": "OperationsClient",
- "rpcs": {
- "CancelOperation": {
- "methods": [
- "CancelOperation"
- ]
- },
- "DeleteOperation": {
- "methods": [
- "DeleteOperation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListOperations": {
- "methods": [
- "ListOperations"
- ]
- },
- "WaitOperation": {
- "methods": [
- "WaitOperation"
- ]
- }
- }
- },
- "rest": {
- "libraryClient": "OperationsClient",
- "rpcs": {
- "CancelOperation": {
- "methods": [
- "CancelOperation"
- ]
- },
- "DeleteOperation": {
- "methods": [
- "DeleteOperation"
- ]
- },
- "GetOperation": {
- "methods": [
- "GetOperation"
- ]
- },
- "ListOperations": {
- "methods": [
- "ListOperations"
- ]
- },
- "WaitOperation": {
- "methods": [
- "WaitOperation"
- ]
- }
- }
- }
- }
- }
- }
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/info.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/info.go
deleted file mode 100644
index b006c4d018e..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/info.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package longrunning
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Also passes any
-// provided key-value pairs. Intended for use by Google-written clients.
-//
-// Internal use only.
-func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) {
- c.setGoogleClientInfo(keyval...)
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go
deleted file mode 100644
index be148ff97a3..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go
+++ /dev/null
@@ -1,1230 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.33.0
-// protoc v4.25.3
-// source: google/longrunning/operations.proto
-
-package longrunningpb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- status "google.golang.org/genproto/googleapis/rpc/status"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status1 "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- descriptorpb "google.golang.org/protobuf/types/descriptorpb"
- anypb "google.golang.org/protobuf/types/known/anypb"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// This resource represents a long-running operation that is the result of a
-// network API call.
-type Operation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The server-assigned name, which is only unique within the same service that
- // originally returns it. If you use the default HTTP mapping, the
- // `name` should be a resource name ending with `operations/{unique_id}`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Service-specific metadata associated with the operation. It typically
- // contains progress information and common metadata such as create time.
- // Some services might not provide such metadata. Any method that returns a
- // long-running operation should document the metadata type, if any.
- Metadata *anypb.Any `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // If the value is `false`, it means the operation is still in progress.
- // If `true`, the operation is completed, and either `error` or `response` is
- // available.
- Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
- // The operation result, which can be either an `error` or a valid `response`.
- // If `done` == `false`, neither `error` nor `response` is set.
- // If `done` == `true`, exactly one of `error` or `response` is set.
- //
- // Types that are assignable to Result:
- //
- // *Operation_Error
- // *Operation_Response
- Result isOperation_Result `protobuf_oneof:"result"`
-}
-
-func (x *Operation) Reset() {
- *x = Operation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Operation) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Operation) ProtoMessage() {}
-
-func (x *Operation) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Operation.ProtoReflect.Descriptor instead.
-func (*Operation) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Operation) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Operation) GetMetadata() *anypb.Any {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *Operation) GetDone() bool {
- if x != nil {
- return x.Done
- }
- return false
-}
-
-func (m *Operation) GetResult() isOperation_Result {
- if m != nil {
- return m.Result
- }
- return nil
-}
-
-func (x *Operation) GetError() *status.Status {
- if x, ok := x.GetResult().(*Operation_Error); ok {
- return x.Error
- }
- return nil
-}
-
-func (x *Operation) GetResponse() *anypb.Any {
- if x, ok := x.GetResult().(*Operation_Response); ok {
- return x.Response
- }
- return nil
-}
-
-type isOperation_Result interface {
- isOperation_Result()
-}
-
-type Operation_Error struct {
- // The error result of the operation in case of failure or cancellation.
- Error *status.Status `protobuf:"bytes,4,opt,name=error,proto3,oneof"`
-}
-
-type Operation_Response struct {
- // The normal response of the operation in case of success. If the original
- // method returns no data on success, such as `Delete`, the response is
- // `google.protobuf.Empty`. If the original method is standard
- // `Get`/`Create`/`Update`, the response should be the resource. For other
- // methods, the response should have the type `XxxResponse`, where `Xxx`
- // is the original method name. For example, if the original method name
- // is `TakeSnapshot()`, the inferred response type is
- // `TakeSnapshotResponse`.
- Response *anypb.Any `protobuf:"bytes,5,opt,name=response,proto3,oneof"`
-}
-
-func (*Operation_Error) isOperation_Result() {}
-
-func (*Operation_Response) isOperation_Result() {}
-
-// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
-type GetOperationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the operation resource.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetOperationRequest) Reset() {
- *x = GetOperationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetOperationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetOperationRequest) ProtoMessage() {}
-
-func (x *GetOperationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetOperationRequest.ProtoReflect.Descriptor instead.
-func (*GetOperationRequest) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *GetOperationRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
-type ListOperationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the operation's parent resource.
- Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
- // The standard list filter.
- Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
- // The standard list page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // The standard list page token.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListOperationsRequest) Reset() {
- *x = ListOperationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListOperationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListOperationsRequest) ProtoMessage() {}
-
-func (x *ListOperationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListOperationsRequest.ProtoReflect.Descriptor instead.
-func (*ListOperationsRequest) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ListOperationsRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *ListOperationsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListOperationsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListOperationsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
-type ListOperationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A list of operations that matches the specified filter in the request.
- Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
- // The standard List next-page token.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListOperationsResponse) Reset() {
- *x = ListOperationsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListOperationsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListOperationsResponse) ProtoMessage() {}
-
-func (x *ListOperationsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListOperationsResponse.ProtoReflect.Descriptor instead.
-func (*ListOperationsResponse) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ListOperationsResponse) GetOperations() []*Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *ListOperationsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
-type CancelOperationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the operation resource to be cancelled.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *CancelOperationRequest) Reset() {
- *x = CancelOperationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CancelOperationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CancelOperationRequest) ProtoMessage() {}
-
-func (x *CancelOperationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CancelOperationRequest.ProtoReflect.Descriptor instead.
-func (*CancelOperationRequest) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *CancelOperationRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
-type DeleteOperationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the operation resource to be deleted.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteOperationRequest) Reset() {
- *x = DeleteOperationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteOperationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteOperationRequest) ProtoMessage() {}
-
-func (x *DeleteOperationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteOperationRequest.ProtoReflect.Descriptor instead.
-func (*DeleteOperationRequest) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *DeleteOperationRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request message for [Operations.WaitOperation][google.longrunning.Operations.WaitOperation].
-type WaitOperationRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the operation resource to wait on.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The maximum duration to wait before timing out. If left blank, the wait
- // will be at most the time permitted by the underlying HTTP/RPC protocol.
- // If RPC context deadline is also specified, the shorter one will be used.
- Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
-}
-
-func (x *WaitOperationRequest) Reset() {
- *x = WaitOperationRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *WaitOperationRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*WaitOperationRequest) ProtoMessage() {}
-
-func (x *WaitOperationRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use WaitOperationRequest.ProtoReflect.Descriptor instead.
-func (*WaitOperationRequest) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *WaitOperationRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration {
- if x != nil {
- return x.Timeout
- }
- return nil
-}
-
-// A message representing the message types used by a long-running operation.
-//
-// Example:
-//
-// rpc LongRunningRecognize(LongRunningRecognizeRequest)
-// returns (google.longrunning.Operation) {
-// option (google.longrunning.operation_info) = {
-// response_type: "LongRunningRecognizeResponse"
-// metadata_type: "LongRunningRecognizeMetadata"
-// };
-// }
-type OperationInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The message name of the primary return type for this
- // long-running operation.
- // This type will be used to deserialize the LRO's response.
- //
- // If the response is in a different package from the rpc, a fully-qualified
- // message name must be used (e.g. `google.protobuf.Struct`).
- //
- // Note: Altering this value constitutes a breaking change.
- ResponseType string `protobuf:"bytes,1,opt,name=response_type,json=responseType,proto3" json:"response_type,omitempty"`
- // Required. The message name of the metadata type for this long-running
- // operation.
- //
- // If the response is in a different package from the rpc, a fully-qualified
- // message name must be used (e.g. `google.protobuf.Struct`).
- //
- // Note: Altering this value constitutes a breaking change.
- MetadataType string `protobuf:"bytes,2,opt,name=metadata_type,json=metadataType,proto3" json:"metadata_type,omitempty"`
-}
-
-func (x *OperationInfo) Reset() {
- *x = OperationInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_longrunning_operations_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OperationInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OperationInfo) ProtoMessage() {}
-
-func (x *OperationInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_longrunning_operations_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OperationInfo.ProtoReflect.Descriptor instead.
-func (*OperationInfo) Descriptor() ([]byte, []int) {
- return file_google_longrunning_operations_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *OperationInfo) GetResponseType() string {
- if x != nil {
- return x.ResponseType
- }
- return ""
-}
-
-func (x *OperationInfo) GetMetadataType() string {
- if x != nil {
- return x.MetadataType
- }
- return ""
-}
-
-var file_google_longrunning_operations_proto_extTypes = []protoimpl.ExtensionInfo{
- {
- ExtendedType: (*descriptorpb.MethodOptions)(nil),
- ExtensionType: (*OperationInfo)(nil),
- Field: 1049,
- Name: "google.longrunning.operation_info",
- Tag: "bytes,1049,opt,name=operation_info",
- Filename: "google/longrunning/operations.proto",
- },
-}
-
-// Extension fields to descriptorpb.MethodOptions.
-var (
- // Additional information regarding long-running operations.
- // In particular, this specifies the types that are returned from
- // long-running operations.
- //
- // Required for methods that return `google.longrunning.Operation`; invalid
- // otherwise.
- //
- // optional google.longrunning.OperationInfo operation_info = 1049;
- E_OperationInfo = &file_google_longrunning_operations_proto_extTypes[0]
-)
-
-var File_google_longrunning_operations_proto protoreflect.FileDescriptor
-
-var file_google_longrunning_operations_proto_rawDesc = []byte{
- 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
- 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70,
- 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00,
- 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x72,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x22, 0x7f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
- 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
- 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x7f, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a,
- 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
- 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
- 0x65, 0x6e, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x22, 0x2c, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x5f,
- 0x0a, 0x14, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69,
- 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22,
- 0x59, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x32, 0xaa, 0x05, 0x0a, 0x0a, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0e, 0x4c, 0x69,
- 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
- 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x7d,
- 0x12, 0x7f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
- 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a,
- 0x7d, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
- 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x27, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x2a,
- 0x7d, 0x12, 0x88, 0x01, 0x0a, 0x0f, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c,
- 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65,
- 0x6c, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x31, 0xda, 0x41, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a, 0x01, 0x2a, 0x22, 0x1f, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x3a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x5a, 0x0a, 0x0d,
- 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69,
- 0x6e, 0x67, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x1a, 0x1d, 0xca, 0x41, 0x1a, 0x6c, 0x6f, 0x6e,
- 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x3a, 0x69, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
- 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
- 0x66, 0x6f, 0x42, 0x9d, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x0f, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x43, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
- 0x67, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x67, 0x65, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
- 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e,
- 0x69, 0x6e, 0x67, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x12, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0xca, 0x02, 0x12,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
- 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_longrunning_operations_proto_rawDescOnce sync.Once
- file_google_longrunning_operations_proto_rawDescData = file_google_longrunning_operations_proto_rawDesc
-)
-
-func file_google_longrunning_operations_proto_rawDescGZIP() []byte {
- file_google_longrunning_operations_proto_rawDescOnce.Do(func() {
- file_google_longrunning_operations_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_longrunning_operations_proto_rawDescData)
- })
- return file_google_longrunning_operations_proto_rawDescData
-}
-
-var file_google_longrunning_operations_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_google_longrunning_operations_proto_goTypes = []interface{}{
- (*Operation)(nil), // 0: google.longrunning.Operation
- (*GetOperationRequest)(nil), // 1: google.longrunning.GetOperationRequest
- (*ListOperationsRequest)(nil), // 2: google.longrunning.ListOperationsRequest
- (*ListOperationsResponse)(nil), // 3: google.longrunning.ListOperationsResponse
- (*CancelOperationRequest)(nil), // 4: google.longrunning.CancelOperationRequest
- (*DeleteOperationRequest)(nil), // 5: google.longrunning.DeleteOperationRequest
- (*WaitOperationRequest)(nil), // 6: google.longrunning.WaitOperationRequest
- (*OperationInfo)(nil), // 7: google.longrunning.OperationInfo
- (*anypb.Any)(nil), // 8: google.protobuf.Any
- (*status.Status)(nil), // 9: google.rpc.Status
- (*durationpb.Duration)(nil), // 10: google.protobuf.Duration
- (*descriptorpb.MethodOptions)(nil), // 11: google.protobuf.MethodOptions
- (*emptypb.Empty)(nil), // 12: google.protobuf.Empty
-}
-var file_google_longrunning_operations_proto_depIdxs = []int32{
- 8, // 0: google.longrunning.Operation.metadata:type_name -> google.protobuf.Any
- 9, // 1: google.longrunning.Operation.error:type_name -> google.rpc.Status
- 8, // 2: google.longrunning.Operation.response:type_name -> google.protobuf.Any
- 0, // 3: google.longrunning.ListOperationsResponse.operations:type_name -> google.longrunning.Operation
- 10, // 4: google.longrunning.WaitOperationRequest.timeout:type_name -> google.protobuf.Duration
- 11, // 5: google.longrunning.operation_info:extendee -> google.protobuf.MethodOptions
- 7, // 6: google.longrunning.operation_info:type_name -> google.longrunning.OperationInfo
- 2, // 7: google.longrunning.Operations.ListOperations:input_type -> google.longrunning.ListOperationsRequest
- 1, // 8: google.longrunning.Operations.GetOperation:input_type -> google.longrunning.GetOperationRequest
- 5, // 9: google.longrunning.Operations.DeleteOperation:input_type -> google.longrunning.DeleteOperationRequest
- 4, // 10: google.longrunning.Operations.CancelOperation:input_type -> google.longrunning.CancelOperationRequest
- 6, // 11: google.longrunning.Operations.WaitOperation:input_type -> google.longrunning.WaitOperationRequest
- 3, // 12: google.longrunning.Operations.ListOperations:output_type -> google.longrunning.ListOperationsResponse
- 0, // 13: google.longrunning.Operations.GetOperation:output_type -> google.longrunning.Operation
- 12, // 14: google.longrunning.Operations.DeleteOperation:output_type -> google.protobuf.Empty
- 12, // 15: google.longrunning.Operations.CancelOperation:output_type -> google.protobuf.Empty
- 0, // 16: google.longrunning.Operations.WaitOperation:output_type -> google.longrunning.Operation
- 12, // [12:17] is the sub-list for method output_type
- 7, // [7:12] is the sub-list for method input_type
- 6, // [6:7] is the sub-list for extension type_name
- 5, // [5:6] is the sub-list for extension extendee
- 0, // [0:5] is the sub-list for field type_name
-}
-
-func init() { file_google_longrunning_operations_proto_init() }
-func file_google_longrunning_operations_proto_init() {
- if File_google_longrunning_operations_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_longrunning_operations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Operation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetOperationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListOperationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListOperationsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelOperationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteOperationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WaitOperationRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_longrunning_operations_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OperationInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_longrunning_operations_proto_msgTypes[0].OneofWrappers = []interface{}{
- (*Operation_Error)(nil),
- (*Operation_Response)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_longrunning_operations_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 8,
- NumExtensions: 1,
- NumServices: 1,
- },
- GoTypes: file_google_longrunning_operations_proto_goTypes,
- DependencyIndexes: file_google_longrunning_operations_proto_depIdxs,
- MessageInfos: file_google_longrunning_operations_proto_msgTypes,
- ExtensionInfos: file_google_longrunning_operations_proto_extTypes,
- }.Build()
- File_google_longrunning_operations_proto = out.File
- file_google_longrunning_operations_proto_rawDesc = nil
- file_google_longrunning_operations_proto_goTypes = nil
- file_google_longrunning_operations_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// OperationsClient is the client API for Operations service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type OperationsClient interface {
- // Lists operations that match the specified filter in the request. If the
- // server doesn't support this method, it returns `UNIMPLEMENTED`.
- //
- // NOTE: the `name` binding allows API services to override the binding
- // to use different resource name schemes, such as `users/*/operations`. To
- // override the binding, API services can add a binding such as
- // `"/v1/{name=users/*}/operations"` to their service configuration.
- // For backwards compatibility, the default name includes the operations
- // collection id, however overriding users must ensure the name binding
- // is the parent resource, without the operations collection id.
- ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error)
- // Gets the latest state of a long-running operation. Clients can use this
- // method to poll the operation result at intervals as recommended by the API
- // service.
- GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error)
- // Deletes a long-running operation. This method indicates that the client is
- // no longer interested in the operation result. It does not cancel the
- // operation. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not
- // guaranteed. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Waits until the specified long-running operation is done or reaches at most
- // a specified timeout, returning the latest state. If the operation is
- // already done, the latest state is immediately returned. If the timeout
- // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
- // timeout is used. If the server does not support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- // Note that this method is on a best-effort basis. It may return the latest
- // state before the specified timeout (including immediately), meaning even an
- // immediate response is no guarantee that the operation is done.
- WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error)
-}
-
-type operationsClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient {
- return &operationsClient{cc}
-}
-
-func (c *operationsClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) {
- out := new(ListOperationsResponse)
- err := c.cc.Invoke(ctx, "/google.longrunning.Operations/ListOperations", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *operationsClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
- out := new(Operation)
- err := c.cc.Invoke(ctx, "/google.longrunning.Operations/GetOperation", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *operationsClient) DeleteOperation(ctx context.Context, in *DeleteOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.longrunning.Operations/DeleteOperation", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *operationsClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.longrunning.Operations/CancelOperation", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *operationsClient) WaitOperation(ctx context.Context, in *WaitOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
- out := new(Operation)
- err := c.cc.Invoke(ctx, "/google.longrunning.Operations/WaitOperation", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// OperationsServer is the server API for Operations service.
-type OperationsServer interface {
- // Lists operations that match the specified filter in the request. If the
- // server doesn't support this method, it returns `UNIMPLEMENTED`.
- //
- // NOTE: the `name` binding allows API services to override the binding
- // to use different resource name schemes, such as `users/*/operations`. To
- // override the binding, API services can add a binding such as
- // `"/v1/{name=users/*}/operations"` to their service configuration.
- // For backwards compatibility, the default name includes the operations
- // collection id, however overriding users must ensure the name binding
- // is the parent resource, without the operations collection id.
- ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error)
- // Gets the latest state of a long-running operation. Clients can use this
- // method to poll the operation result at intervals as recommended by the API
- // service.
- GetOperation(context.Context, *GetOperationRequest) (*Operation, error)
- // Deletes a long-running operation. This method indicates that the client is
- // no longer interested in the operation result. It does not cancel the
- // operation. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error)
- // Starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not
- // guaranteed. If the server doesn't support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`. Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error)
- // Waits until the specified long-running operation is done or reaches at most
- // a specified timeout, returning the latest state. If the operation is
- // already done, the latest state is immediately returned. If the timeout
- // specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
- // timeout is used. If the server does not support this method, it returns
- // `google.rpc.Code.UNIMPLEMENTED`.
- // Note that this method is on a best-effort basis. It may return the latest
- // state before the specified timeout (including immediately), meaning even an
- // immediate response is no guarantee that the operation is done.
- WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error)
-}
-
-// UnimplementedOperationsServer can be embedded to have forward compatible implementations.
-type UnimplementedOperationsServer struct {
-}
-
-func (*UnimplementedOperationsServer) ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method ListOperations not implemented")
-}
-func (*UnimplementedOperationsServer) GetOperation(context.Context, *GetOperationRequest) (*Operation, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method GetOperation not implemented")
-}
-func (*UnimplementedOperationsServer) DeleteOperation(context.Context, *DeleteOperationRequest) (*emptypb.Empty, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method DeleteOperation not implemented")
-}
-func (*UnimplementedOperationsServer) CancelOperation(context.Context, *CancelOperationRequest) (*emptypb.Empty, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method CancelOperation not implemented")
-}
-func (*UnimplementedOperationsServer) WaitOperation(context.Context, *WaitOperationRequest) (*Operation, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method WaitOperation not implemented")
-}
-
-func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) {
- s.RegisterService(&_Operations_serviceDesc, srv)
-}
-
-func _Operations_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListOperationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(OperationsServer).ListOperations(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.longrunning.Operations/ListOperations",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(OperationsServer).ListOperations(ctx, req.(*ListOperationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Operations_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetOperationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(OperationsServer).GetOperation(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.longrunning.Operations/GetOperation",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(OperationsServer).GetOperation(ctx, req.(*GetOperationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Operations_DeleteOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteOperationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(OperationsServer).DeleteOperation(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.longrunning.Operations/DeleteOperation",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(OperationsServer).DeleteOperation(ctx, req.(*DeleteOperationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Operations_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CancelOperationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(OperationsServer).CancelOperation(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.longrunning.Operations/CancelOperation",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(OperationsServer).CancelOperation(ctx, req.(*CancelOperationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Operations_WaitOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(WaitOperationRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(OperationsServer).WaitOperation(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.longrunning.Operations/WaitOperation",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(OperationsServer).WaitOperation(ctx, req.(*WaitOperationRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Operations_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.longrunning.Operations",
- HandlerType: (*OperationsServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "ListOperations",
- Handler: _Operations_ListOperations_Handler,
- },
- {
- MethodName: "GetOperation",
- Handler: _Operations_GetOperation_Handler,
- },
- {
- MethodName: "DeleteOperation",
- Handler: _Operations_DeleteOperation_Handler,
- },
- {
- MethodName: "CancelOperation",
- Handler: _Operations_CancelOperation_Handler,
- },
- {
- MethodName: "WaitOperation",
- Handler: _Operations_WaitOperation_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/longrunning/operations.proto",
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/upstream/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
deleted file mode 100644
index abdb2d6b638..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go
+++ /dev/null
@@ -1,869 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package longrunning
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newOperationsClientHook clientHook
-
-// OperationsCallOptions contains the retry settings for each method of OperationsClient.
-type OperationsCallOptions struct {
- ListOperations []gax.CallOption
- GetOperation []gax.CallOption
- DeleteOperation []gax.CallOption
- CancelOperation []gax.CallOption
- WaitOperation []gax.CallOption
-}
-
-func defaultOperationsGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("longrunning.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultOperationsCallOptions() *OperationsCallOptions {
- return &OperationsCallOptions{
- ListOperations: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- GetOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- DeleteOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- CancelOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- WaitOperation: []gax.CallOption{},
- }
-}
-
-func defaultOperationsRESTCallOptions() *OperationsCallOptions {
- return &OperationsCallOptions{
- ListOperations: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- },
- http.StatusServiceUnavailable)
- }),
- },
- GetOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- },
- http.StatusServiceUnavailable)
- }),
- },
- DeleteOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- },
- http.StatusServiceUnavailable)
- }),
- },
- CancelOperation: []gax.CallOption{
- gax.WithTimeout(10000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 500 * time.Millisecond,
- Max: 10000 * time.Millisecond,
- Multiplier: 2.00,
- },
- http.StatusServiceUnavailable)
- }),
- },
- WaitOperation: []gax.CallOption{},
- }
-}
-
-// internalOperationsClient is an interface that defines the methods available from Long Running Operations API.
-type internalOperationsClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
- DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error
- CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error
- WaitOperation(context.Context, *longrunningpb.WaitOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
-}
-
-// OperationsClient is a client for interacting with Long Running Operations API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// Manages long-running operations with an API service.
-//
-// When an API method normally takes long time to complete, it can be designed
-// to return Operation to the client, and the client can use this
-// interface to receive the real response asynchronously by polling the
-// operation resource, or pass the operation resource to another API (such as
-// Google Cloud Pub/Sub API) to receive the response. Any API service that
-// returns long-running operations should implement the Operations interface
-// so developers can have a consistent client experience.
-type OperationsClient struct {
- // The internal transport-dependent client.
- internalClient internalOperationsClient
-
- // The call options for this service.
- CallOptions *OperationsCallOptions
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *OperationsClient) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *OperationsClient) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *OperationsClient) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// ListOperations lists operations that match the specified filter in the request. If the
-// server doesn’t support this method, it returns UNIMPLEMENTED.
-//
-// NOTE: the name binding allows API services to override the binding
-// to use different resource name schemes, such as users/*/operations. To
-// override the binding, API services can add a binding such as
-// "/v1/{name=users/*}/operations" to their service configuration.
-// For backwards compatibility, the default name includes the operations
-// collection id, however overriding users must ensure the name binding
-// is the parent resource, without the operations collection id.
-func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListOperations(ctx, req, opts...)
-}
-
-// GetOperation gets the latest state of a long-running operation. Clients can use this
-// method to poll the operation result at intervals as recommended by the API
-// service.
-func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
-// DeleteOperation deletes a long-running operation. This method indicates that the client is
-// no longer interested in the operation result. It does not cancel the
-// operation. If the server doesn’t support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED.
-func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteOperation(ctx, req, opts...)
-}
-
-// CancelOperation starts asynchronous cancellation on a long-running operation. The server
-// makes a best effort to cancel the operation, but success is not
-// guaranteed. If the server doesn’t support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED. Clients can use
-// Operations.GetOperation or
-// other methods to check whether the cancellation succeeded or whether the
-// operation completed despite cancellation. On successful cancellation,
-// the operation is not deleted; instead, it becomes an operation with
-// an Operation.error value with a google.rpc.Status.code of 1,
-// corresponding to Code.CANCELLED.
-func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- return c.internalClient.CancelOperation(ctx, req, opts...)
-}
-
-// WaitOperation waits until the specified long-running operation is done or reaches at most
-// a specified timeout, returning the latest state. If the operation is
-// already done, the latest state is immediately returned. If the timeout
-// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
-// timeout is used. If the server does not support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED.
-// Note that this method is on a best-effort basis. It may return the latest
-// state before the specified timeout (including immediately), meaning even an
-// immediate response is no guarantee that the operation is done.
-func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.WaitOperation(ctx, req, opts...)
-}
-
-// operationsGRPCClient is a client for interacting with Long Running Operations API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type operationsGRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing OperationsClient
- CallOptions **OperationsCallOptions
-
- // The gRPC API client.
- operationsClient longrunningpb.OperationsClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewOperationsClient creates a new operations client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// Manages long-running operations with an API service.
-//
-// When an API method normally takes long time to complete, it can be designed
-// to return Operation to the client, and the client can use this
-// interface to receive the real response asynchronously by polling the
-// operation resource, or pass the operation resource to another API (such as
-// Google Cloud Pub/Sub API) to receive the response. Any API service that
-// returns long-running operations should implement the Operations interface
-// so developers can have a consistent client experience.
-func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
- clientOpts := defaultOperationsGRPCClientOptions()
- if newOperationsClientHook != nil {
- hookOpts, err := newOperationsClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := OperationsClient{CallOptions: defaultOperationsCallOptions()}
-
- c := &operationsGRPCClient{
- connPool: connPool,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
- CallOptions: &client.CallOptions,
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *operationsGRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *operationsGRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type operationsRESTClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing OperationsClient
- CallOptions **OperationsCallOptions
-}
-
-// NewOperationsRESTClient creates a new operations rest client.
-//
-// Manages long-running operations with an API service.
-//
-// When an API method normally takes long time to complete, it can be designed
-// to return Operation to the client, and the client can use this
-// interface to receive the real response asynchronously by polling the
-// operation resource, or pass the operation resource to another API (such as
-// Google Cloud Pub/Sub API) to receive the response. Any API service that
-// returns long-running operations should implement the Operations interface
-// so developers can have a consistent client experience.
-func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
- clientOpts := append(defaultOperationsRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultOperationsRESTCallOptions()
- c := &operationsRESTClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultOperationsRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://longrunning.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *operationsRESTClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *operationsRESTClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &longrunningpb.ListOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
- opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.WaitOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-// ListOperations lists operations that match the specified filter in the request. If the
-// server doesn’t support this method, it returns UNIMPLEMENTED.
-//
-// NOTE: the name binding allows API services to override the binding
-// to use different resource name schemes, such as users/*/operations. To
-// override the binding, API services can add a binding such as
-// "/v1/{name=users/*}/operations" to their service configuration.
-// For backwards compatibility, the default name includes the operations
-// collection id, however overriding users must ensure the name binding
-// is the parent resource, without the operations collection id.
-func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &longrunningpb.ListOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// GetOperation gets the latest state of a long-running operation. Clients can use this
-// method to poll the operation result at intervals as recommended by the API
-// service.
-func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// DeleteOperation deletes a long-running operation. This method indicates that the client is
-// no longer interested in the operation result. It does not cancel the
-// operation. If the server doesn’t support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED.
-func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// CancelOperation starts asynchronous cancellation on a long-running operation. The server
-// makes a best effort to cancel the operation, but success is not
-// guaranteed. If the server doesn’t support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED. Clients can use
-// Operations.GetOperation or
-// other methods to check whether the cancellation succeeded or whether the
-// operation completed despite cancellation. On successful cancellation,
-// the operation is not deleted; instead, it becomes an operation with
-// an Operation.error value with a google.rpc.Status.code of 1,
-// corresponding to Code.CANCELLED.
-func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName())
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// WaitOperation waits until the specified long-running operation is done or reaches at most
-// a specified timeout, returning the latest state. If the operation is
-// already done, the latest state is immediately returned. If the timeout
-// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC
-// timeout is used. If the server does not support this method, it returns
-// google.rpc.Code.UNIMPLEMENTED.
-// Note that this method is on a best-effort basis. It may return the latest
-// state before the specified timeout (including immediately), meaning even an
-// immediate response is no guarantee that the operation is done.
-func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("")
-
- params := url.Values{}
- if req.GetName() != "" {
- params.Add("name", fmt.Sprintf("%v", req.GetName()))
- }
- if req.GetTimeout() != nil {
- timeout, err := protojson.Marshal(req.GetTimeout())
- if err != nil {
- return nil, err
- }
- params.Add("timeout", string(timeout[1:len(timeout)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
diff --git a/upstream/vendor/cloud.google.com/go/longrunning/longrunning.go b/upstream/vendor/cloud.google.com/go/longrunning/longrunning.go
deleted file mode 100644
index 3c75b761e46..00000000000
--- a/upstream/vendor/cloud.google.com/go/longrunning/longrunning.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2016 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package longrunning supports Long Running Operations for the Google Cloud Libraries.
-// See google.golang.org/genproto/googleapis/longrunning for its service definition.
-//
-// Users of the Google Cloud Libraries will typically not use this package directly.
-// Instead they will call functions returning Operations and call their methods.
-//
-// This package is still experimental and subject to change.
-package longrunning // import "cloud.google.com/go/longrunning"
-
-import (
- "context"
- "errors"
- "fmt"
- "time"
-
- autogen "cloud.google.com/go/longrunning/autogen"
- pb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- gax "github.com/googleapis/gax-go/v2"
- "github.com/googleapis/gax-go/v2/apierror"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/protoadapt"
- "google.golang.org/protobuf/types/known/anypb"
-)
-
-// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata.
-var ErrNoMetadata = errors.New("operation contains no metadata")
-
-// Operation represents the result of an API call that may not be ready yet.
-type Operation struct {
- c operationsClient
- proto *pb.Operation
-}
-
-type operationsClient interface {
- GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error)
- CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error
- DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error
-}
-
-// InternalNewOperation is for use by the google Cloud Libraries only.
-//
-// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation.
-// The conn parameter refers to a server that proto was received from.
-func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation {
- return &Operation{
- c: inner,
- proto: proto,
- }
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service
-// from which the operation is created.
-func (op *Operation) Name() string {
- return op.proto.Name
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *Operation) Done() bool {
- return op.proto.Done
-}
-
-// Metadata unmarshals op's metadata into meta.
-// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified.
-func (op *Operation) Metadata(meta protoadapt.MessageV1) error {
- if m := op.proto.Metadata; m != nil {
- metav2 := protoadapt.MessageV2Of(meta)
- return anypb.UnmarshalTo(m, metav2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true})
- }
- return ErrNoMetadata
-}
-
-// Poll fetches the latest state of a long-running operation.
-//
-// If Poll fails, the error is returned and op is unmodified.
-// If Poll succeeds and the operation has completed with failure,
-// the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true; if resp != nil, the response of the operation
-// is stored in resp.
-func (op *Operation) Poll(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error {
- if !op.Done() {
- p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...)
- if err != nil {
- return err
- }
- op.proto = p
- }
- if !op.Done() {
- return nil
- }
-
- switch r := op.proto.Result.(type) {
- case *pb.Operation_Error:
- err, _ := apierror.FromError(status.ErrorProto(r.Error))
- return err
- case *pb.Operation_Response:
- if resp == nil {
- return nil
- }
- respv2 := protoadapt.MessageV2Of(resp)
- return anypb.UnmarshalTo(r.Response, respv2, proto.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true})
- default:
- return fmt.Errorf("unsupported result type %[1]T: %[1]v", r)
- }
-}
-
-// DefaultWaitInterval is the polling interval used by Operation.Wait.
-const DefaultWaitInterval = 60 * time.Second
-
-// Wait is equivalent to WaitWithInterval using DefaultWaitInterval.
-func (op *Operation) Wait(ctx context.Context, resp protoadapt.MessageV1, opts ...gax.CallOption) error {
- return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...)
-}
-
-// WaitWithInterval blocks until the operation is completed.
-// If resp != nil, Wait stores the response in resp.
-// WaitWithInterval polls every interval, except initially
-// when it polls using exponential backoff.
-//
-// See documentation of Poll for error-handling information.
-func (op *Operation) WaitWithInterval(ctx context.Context, resp protoadapt.MessageV1, interval time.Duration, opts ...gax.CallOption) error {
- bo := gax.Backoff{
- Initial: 1 * time.Second,
- Max: interval,
- }
- if bo.Max < bo.Initial {
- bo.Max = bo.Initial
- }
- return op.wait(ctx, resp, &bo, gax.Sleep, opts...)
-}
-
-type sleeper func(context.Context, time.Duration) error
-
-// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing.
-func (op *Operation) wait(ctx context.Context, resp protoadapt.MessageV1, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error {
- for {
- if err := op.Poll(ctx, resp, opts...); err != nil {
- return err
- }
- if op.Done() {
- return nil
- }
- if err := sl(ctx, bo.Pause()); err != nil {
- return err
- }
- }
-}
-
-// Cancel starts asynchronous cancellation on a long-running operation. The server
-// makes a best effort to cancel the operation, but success is not
-// guaranteed. If the server doesn't support this method, it returns
-// status.Code(err) == codes.Unimplemented. Clients can use
-// Poll or other methods to check whether the cancellation succeeded or whether the
-// operation completed despite cancellation. On successful cancellation,
-// the operation is not deleted; instead, op.Poll returns an error
-// with code Canceled.
-func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error {
- return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...)
-}
-
-// Delete deletes a long-running operation. This method indicates that the client is
-// no longer interested in the operation result. It does not cancel the
-// operation. If the server doesn't support this method, status.Code(err) == codes.Unimplemented.
-func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error {
- return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...)
-}
diff --git a/upstream/vendor/cloud.google.com/go/migration.md b/upstream/vendor/cloud.google.com/go/migration.md
deleted file mode 100644
index 224dcfa1397..00000000000
--- a/upstream/vendor/cloud.google.com/go/migration.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# go-genproto to google-cloud-go message type migration
-
-The message types for all of our client libraries are being migrated from the
-`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto)
-to their respective product specific module in this repository. For example
-this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest)
-can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest).
-
-Although the type definitions have moved, aliases have been left in the old
-genproto packages to ensure a smooth non-breaking transition.
-
-## How do I migrate to the new packages?
-
-The easiest option is to run a migration tool at the root of our project. It is
-like `go fix`, but specifically for this migration. Before running the tool it
-is best to make sure any modules that have the prefix of `cloud.google.com/go`
-are up to date. To run the tool, do the following:
-
-```bash
-go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest .
-go mod tidy
-```
-
-The tool should only change up to one line in the import statement per file.
-This can also be done by hand if you prefer.
-
-## Do I have to migrate?
-
-Yes if you wish to keep using the newest versions of our client libraries with
-the newest features -- You should migrate by the start of 2023. Until then we
-will keep updating the aliases in go-genproto weekly. If you have an existing
-workload that uses these client libraries and does not need to update its
-dependencies there is no action to take. All existing written code will continue
-to work.
-
-## Why are these types being moved
-
-1. This change will help simplify dependency trees over time.
-2. The types will now be in product specific modules that are versioned
- independently with semver. This is especially a benefit for users that rely
- on multiple clients in a single application. Because message types are no
- longer mono-packaged users are less likely to run into intermediate
- dependency conflicts when updating dependencies.
-3. Having all these types in one repository will help us ensure that unintended
- changes are caught before they would be released.
-
-## Have questions?
-
-Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help)
-if you have any questions or concerns.
diff --git a/upstream/vendor/cloud.google.com/go/release-please-config-individual.json b/upstream/vendor/cloud.google.com/go/release-please-config-individual.json
deleted file mode 100644
index 93054b7de60..00000000000
--- a/upstream/vendor/cloud.google.com/go/release-please-config-individual.json
+++ /dev/null
@@ -1,57 +0,0 @@
-{
- "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json",
- "release-type": "go-yoshi",
- "include-component-in-tag": true,
- "separate-pull-requests": true,
- "tag-separator": "/",
- "packages": {
- "aiplatform": {
- "component": "aiplatform"
- },
- "auth": {
- "component": "auth"
- },
- "auth/oauth2adapt": {
- "component": "auth/oauth2adapt"
- },
- "bigquery": {
- "component": "bigquery"
- },
- "bigtable": {
- "component": "bigtable"
- },
- "datastore": {
- "component": "datastore"
- },
- "errorreporting": {
- "component": "errorreporting"
- },
- "firestore": {
- "component": "firestore"
- },
- "logging": {
- "component": "logging"
- },
- "profiler": {
- "component": "profiler"
- },
- "pubsub": {
- "component": "pubsub"
- },
- "pubsublite": {
- "component": "pubsublite"
- },
- "spanner": {
- "component": "spanner"
- },
- "storage": {
- "component": "storage"
- },
- "vertexai": {
- "component": "vertexai"
- }
- },
- "plugins": [
- "sentence-case"
- ]
-}
diff --git a/upstream/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/upstream/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
deleted file mode 100644
index 3615b1952ec..00000000000
--- a/upstream/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
+++ /dev/null
@@ -1,439 +0,0 @@
-{
- "release-type": "go-yoshi",
- "include-component-in-tag": true,
- "tag-separator": "/",
- "packages": {
- "accessapproval": {
- "component": "accessapproval"
- },
- "accesscontextmanager": {
- "component": "accesscontextmanager"
- },
- "advisorynotifications": {
- "component": "advisorynotifications"
- },
- "alloydb": {
- "component": "alloydb"
- },
- "analytics": {
- "component": "analytics"
- },
- "apigateway": {
- "component": "apigateway"
- },
- "apigeeconnect": {
- "component": "apigeeconnect"
- },
- "apigeeregistry": {
- "component": "apigeeregistry"
- },
- "apikeys": {
- "component": "apikeys"
- },
- "appengine": {
- "component": "appengine"
- },
- "apphub": {
- "component": "apphub"
- },
- "apps": {
- "component": "apps"
- },
- "area120": {
- "component": "area120"
- },
- "artifactregistry": {
- "component": "artifactregistry"
- },
- "asset": {
- "component": "asset"
- },
- "assuredworkloads": {
- "component": "assuredworkloads"
- },
- "automl": {
- "component": "automl"
- },
- "backupdr": {
- "component": "backupdr"
- },
- "baremetalsolution": {
- "component": "baremetalsolution"
- },
- "batch": {
- "component": "batch"
- },
- "beyondcorp": {
- "component": "beyondcorp"
- },
- "billing": {
- "component": "billing"
- },
- "binaryauthorization": {
- "component": "binaryauthorization"
- },
- "certificatemanager": {
- "component": "certificatemanager"
- },
- "channel": {
- "component": "channel"
- },
- "chat": {
- "component": "chat"
- },
- "cloudbuild": {
- "component": "cloudbuild"
- },
- "cloudcontrolspartner": {
- "component": "cloudcontrolspartner"
- },
- "clouddms": {
- "component": "clouddms"
- },
- "cloudprofiler": {
- "component": "cloudprofiler"
- },
- "cloudquotas": {
- "component": "cloudquotas"
- },
- "cloudtasks": {
- "component": "cloudtasks"
- },
- "commerce": {
- "component": "commerce"
- },
- "compute": {
- "component": "compute"
- },
- "compute/metadata": {
- "component": "compute/metadata"
- },
- "confidentialcomputing": {
- "component": "confidentialcomputing"
- },
- "config": {
- "component": "config"
- },
- "contactcenterinsights": {
- "component": "contactcenterinsights"
- },
- "container": {
- "component": "container"
- },
- "containeranalysis": {
- "component": "containeranalysis"
- },
- "datacatalog": {
- "component": "datacatalog"
- },
- "dataflow": {
- "component": "dataflow"
- },
- "dataform": {
- "component": "dataform"
- },
- "datafusion": {
- "component": "datafusion"
- },
- "datalabeling": {
- "component": "datalabeling"
- },
- "dataplex": {
- "component": "dataplex"
- },
- "dataproc": {
- "component": "dataproc"
- },
- "dataqna": {
- "component": "dataqna"
- },
- "datastream": {
- "component": "datastream"
- },
- "deploy": {
- "component": "deploy"
- },
- "dialogflow": {
- "component": "dialogflow"
- },
- "discoveryengine": {
- "component": "discoveryengine"
- },
- "dlp": {
- "component": "dlp"
- },
- "documentai": {
- "component": "documentai"
- },
- "domains": {
- "component": "domains"
- },
- "edgecontainer": {
- "component": "edgecontainer"
- },
- "edgenetwork": {
- "component": "edgenetwork"
- },
- "essentialcontacts": {
- "component": "essentialcontacts"
- },
- "eventarc": {
- "component": "eventarc"
- },
- "filestore": {
- "component": "filestore"
- },
- "functions": {
- "component": "functions"
- },
- "gkebackup": {
- "component": "gkebackup"
- },
- "gkeconnect": {
- "component": "gkeconnect"
- },
- "gkehub": {
- "component": "gkehub"
- },
- "gkemulticloud": {
- "component": "gkemulticloud"
- },
- "grafeas": {
- "component": "grafeas"
- },
- "gsuiteaddons": {
- "component": "gsuiteaddons"
- },
- "iam": {
- "component": "iam"
- },
- "iap": {
- "component": "iap"
- },
- "identitytoolkit": {
- "component": "identitytoolkit"
- },
- "ids": {
- "component": "ids"
- },
- "iot": {
- "component": "iot"
- },
- "kms": {
- "component": "kms"
- },
- "language": {
- "component": "language"
- },
- "lifesciences": {
- "component": "lifesciences"
- },
- "longrunning": {
- "component": "longrunning"
- },
- "managedidentities": {
- "component": "managedidentities"
- },
- "maps": {
- "component": "maps"
- },
- "mediatranslation": {
- "component": "mediatranslation"
- },
- "memcache": {
- "component": "memcache"
- },
- "metastore": {
- "component": "metastore"
- },
- "migrationcenter": {
- "component": "migrationcenter"
- },
- "monitoring": {
- "component": "monitoring"
- },
- "netapp": {
- "component": "netapp"
- },
- "networkconnectivity": {
- "component": "networkconnectivity"
- },
- "networkmanagement": {
- "component": "networkmanagement"
- },
- "networksecurity": {
- "component": "networksecurity"
- },
- "notebooks": {
- "component": "notebooks"
- },
- "optimization": {
- "component": "optimization"
- },
- "orchestration": {
- "component": "orchestration"
- },
- "orgpolicy": {
- "component": "orgpolicy"
- },
- "osconfig": {
- "component": "osconfig"
- },
- "oslogin": {
- "component": "oslogin"
- },
- "parallelstore": {
- "component": "parallelstore"
- },
- "phishingprotection": {
- "component": "phishingprotection"
- },
- "policysimulator": {
- "component": "policysimulator"
- },
- "policytroubleshooter": {
- "component": "policytroubleshooter"
- },
- "privatecatalog": {
- "component": "privatecatalog"
- },
- "rapidmigrationassessment": {
- "component": "rapidmigrationassessment"
- },
- "recaptchaenterprise": {
- "component": "recaptchaenterprise"
- },
- "recommendationengine": {
- "component": "recommendationengine"
- },
- "recommender": {
- "component": "recommender"
- },
- "redis": {
- "component": "redis"
- },
- "resourcemanager": {
- "component": "resourcemanager"
- },
- "resourcesettings": {
- "component": "resourcesettings"
- },
- "retail": {
- "component": "retail"
- },
- "run": {
- "component": "run"
- },
- "scheduler": {
- "component": "scheduler"
- },
- "secretmanager": {
- "component": "secretmanager"
- },
- "securesourcemanager": {
- "component": "securesourcemanager"
- },
- "security": {
- "component": "security"
- },
- "securitycenter": {
- "component": "securitycenter"
- },
- "securitycentermanagement": {
- "component": "securitycentermanagement"
- },
- "securityposture": {
- "component": "securityposture"
- },
- "servicecontrol": {
- "component": "servicecontrol"
- },
- "servicedirectory": {
- "component": "servicedirectory"
- },
- "servicehealth": {
- "component": "servicehealth"
- },
- "servicemanagement": {
- "component": "servicemanagement"
- },
- "serviceusage": {
- "component": "serviceusage"
- },
- "shell": {
- "component": "shell"
- },
- "shopping": {
- "component": "shopping"
- },
- "speech": {
- "component": "speech"
- },
- "storageinsights": {
- "component": "storageinsights"
- },
- "storagetransfer": {
- "component": "storagetransfer"
- },
- "streetview": {
- "component": "streetview"
- },
- "support": {
- "component": "support"
- },
- "talent": {
- "component": "talent"
- },
- "telcoautomation": {
- "component": "telcoautomation"
- },
- "texttospeech": {
- "component": "texttospeech"
- },
- "tpu": {
- "component": "tpu"
- },
- "trace": {
- "component": "trace"
- },
- "translate": {
- "component": "translate"
- },
- "video": {
- "component": "video"
- },
- "videointelligence": {
- "component": "videointelligence"
- },
- "vision": {
- "component": "vision"
- },
- "visionai": {
- "component": "visionai"
- },
- "vmmigration": {
- "component": "vmmigration"
- },
- "vmwareengine": {
- "component": "vmwareengine"
- },
- "vpcaccess": {
- "component": "vpcaccess"
- },
- "webrisk": {
- "component": "webrisk"
- },
- "websecurityscanner": {
- "component": "websecurityscanner"
- },
- "workflows": {
- "component": "workflows"
- },
- "workstations": {
- "component": "workstations"
- }
- },
- "plugins": [
- "sentence-case"
- ]
-}
diff --git a/upstream/vendor/cloud.google.com/go/release-please-config.json b/upstream/vendor/cloud.google.com/go/release-please-config.json
deleted file mode 100644
index 1400245b8a3..00000000000
--- a/upstream/vendor/cloud.google.com/go/release-please-config.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "release-type": "go-yoshi",
- "separate-pull-requests": true,
- "include-component-in-tag": false,
- "packages": {
- ".": {
- "component": "main"
- }
- },
- "plugins": ["sentence-case"]
-}
diff --git a/upstream/vendor/cloud.google.com/go/testing.md b/upstream/vendor/cloud.google.com/go/testing.md
deleted file mode 100644
index 78bb35b3b6e..00000000000
--- a/upstream/vendor/cloud.google.com/go/testing.md
+++ /dev/null
@@ -1,237 +0,0 @@
-# Testing Code that depends on Go Client Libraries
-
-The Go client libraries generated as a part of `cloud.google.com/go` all take
-the approach of returning concrete types instead of interfaces. That way, new
-fields and methods can be added to the libraries without breaking users. This
-document will go over some patterns that can be used to test code that depends
-on the Go client libraries.
-
-## Testing gRPC services using fakes
-
-*Note*: You can see the full
-[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/fake).
-
-The clients found in `cloud.google.com/go` are gRPC based, with a couple of
-notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage)
-and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients.
-Interactions with gRPC services can be faked by serving up your own in-memory
-server within your test. One benefit of using this approach is that you don’t
-need to define an interface in your runtime code; you can keep using
-concrete struct types. You instead define a fake server in your test code. For
-example, take a look at the following function:
-
-```go
-import (
- "context"
- "fmt"
- "log"
- "os"
-
- translate "cloud.google.com/go/translate/apiv3"
- "github.com/googleapis/gax-go/v2"
- translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
-)
-
-func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) {
- ctx := context.Background()
- log.Printf("Translating %q to %q", text, targetLang)
- req := &translatepb.TranslateTextRequest{
- Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")),
- TargetLanguageCode: "en-US",
- Contents: []string{text},
- }
- resp, err := client.TranslateText(ctx, req)
- if err != nil {
- return "", fmt.Errorf("unable to translate text: %v", err)
- }
- translations := resp.GetTranslations()
- if len(translations) != 1 {
- return "", fmt.Errorf("expected only one result, got %d", len(translations))
- }
- return translations[0].TranslatedText, nil
-}
-```
-
-Here is an example of what a fake server implementation would look like for
-faking the interactions above:
-
-```go
-import (
- "context"
-
- translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
-)
-
-type fakeTranslationServer struct {
- translatepb.UnimplementedTranslationServiceServer
-}
-
-func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) {
- resp := &translatepb.TranslateTextResponse{
- Translations: []*translatepb.Translation{
- &translatepb.Translation{
- TranslatedText: "Hello World",
- },
- },
- }
- return resp, nil
-}
-```
-
-All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto)
-contains a similar `package.UnimplementedFooServer` type that is useful for
-creating fakes. By embedding the unimplemented server in the
-`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server
-exposes. Then, by providing our own `fakeTranslationServer.TranslateText`
-method you can “override” the default unimplemented behavior of the one RPC that
-you would like to be faked.
-
-The test itself does require a little bit of setup: start up a `net.Listener`,
-register the server, and tell the client library to call the server:
-
-```go
-import (
- "context"
- "net"
- "testing"
-
- translate "cloud.google.com/go/translate/apiv3"
- "google.golang.org/api/option"
- translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func TestTranslateTextWithConcreteClient(t *testing.T) {
- ctx := context.Background()
-
- // Setup the fake server.
- fakeTranslationServer := &fakeTranslationServer{}
- l, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatal(err)
- }
- gsrv := grpc.NewServer()
- translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer)
- fakeServerAddr := l.Addr().String()
- go func() {
- if err := gsrv.Serve(l); err != nil {
- panic(err)
- }
- }()
-
- // Create a client.
- client, err := translate.NewTranslationClient(ctx,
- option.WithEndpoint(fakeServerAddr),
- option.WithoutAuthentication(),
- option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
- )
- if err != nil {
- t.Fatal(err)
- }
-
- // Run the test.
- text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US")
- if err != nil {
- t.Fatal(err)
- }
- if text != "Hello World" {
- t.Fatalf("got %q, want Hello World", text)
- }
-}
-```
-
-## Testing using mocks
-
-*Note*: You can see the full
-[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/main/internal/examples/mock).
-
-When mocking code you need to work with interfaces. Let’s create an interface
-for the `cloud.google.com/go/translate/apiv3` client used in the
-`TranslateTextWithConcreteClient` function mentioned in the previous section.
-The `translate.Client` has over a dozen methods but this code only uses one of
-them. Here is an interface that satisfies the interactions of the
-`translate.Client` in this function.
-
-```go
-type TranslationClient interface {
- TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error)
-}
-```
-
-Now that we have an interface that satisfies the method being used we can
-rewrite the function signature to take the interface instead of the concrete
-type.
-
-```go
-func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) {
-// ...
-}
-```
-
-This allows a real `translate.Client` to be passed to the method in production
-and for a mock implementation to be passed in during testing. This pattern can
-be applied to any Go code, not just `cloud.google.com/go`. This is because
-interfaces in Go are implicitly satisfied. Structs in the client libraries can
-implicitly implement interfaces defined in your codebase. Let’s take a look at
-what it might look like to define a lightweight mock for the `TranslationClient`
-interface.
-
-```go
-import (
- "context"
- "testing"
-
- "github.com/googleapis/gax-go/v2"
- translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3"
-)
-
-type mockClient struct{}
-
-func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) {
- resp := &translatepb.TranslateTextResponse{
- Translations: []*translatepb.Translation{
- &translatepb.Translation{
- TranslatedText: "Hello World",
- },
- },
- }
- return resp, nil
-}
-
-func TestTranslateTextWithAbstractClient(t *testing.T) {
- client := &mockClient{}
- text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US")
- if err != nil {
- t.Fatal(err)
- }
- if text != "Hello World" {
- t.Fatalf("got %q, want Hello World", text)
- }
-}
-```
-
-If you prefer to not write your own mocks there are mocking frameworks such as
-[golang/mock](https://github.com/golang/mock) which can generate mocks for you
-from an interface. As a word of caution though, try to not
-[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html).
-
-## Testing using emulators
-
-Some of the client libraries provided in `cloud.google.com/go` support running
-against a service emulator. The concept is similar to that of using fakes,
-mentioned above, but the server is managed for you. You just need to start it up
-and instruct the client library to talk to the emulator by setting a service
-specific emulator environment variable. Current services/environment-variables
-are:
-
-- bigtable: `BIGTABLE_EMULATOR_HOST`
-- datastore: `DATASTORE_EMULATOR_HOST`
-- firestore: `FIRESTORE_EMULATOR_HOST`
-- pubsub: `PUBSUB_EMULATOR_HOST`
-- spanner: `SPANNER_EMULATOR_HOST`
-- storage: `STORAGE_EMULATOR_HOST`
- - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud.
-
-For more information on emulators please refer to the
-[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators).
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index a6675492b1a..bf0c3e1aacc 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,29 +1,5 @@
# Release History
-## 1.11.1 (2024-04-02)
-
-### Bugs Fixed
-
-* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure.
-* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal.
-
-## 1.11.0 (2024-04-01)
-
-### Features Added
-
-* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration.
-* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies.
-* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name.
-
-### Bugs Fixed
-
-* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values.
-* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure.
-
-### Other Changes
-
-* Update dependencies.
-
## 1.10.0 (2024-02-29)
### Features Added
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
index f18caf84893..83cf91e3ecb 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
@@ -20,11 +20,6 @@ type BearerTokenOptions struct {
// policy's credential must support multitenant authentication.
AuxiliaryTenants []string
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
// Scopes contains the list of permission scopes required for the token.
Scopes []string
}
@@ -49,11 +44,6 @@ type RegistrationOptions struct {
// The default valule is 5 minutes.
// NOTE: Setting this to a small value might cause the policy to prematurely fail.
PollingDuration time.Duration
-
- // StatusCodes contains the slice of custom HTTP status codes to use instead
- // of the default http.StatusConflict. This should only be set if a service
- // returns a non-standard HTTP status code when unregistered.
- StatusCodes []int
}
// ClientOptions contains configuration settings for a client's pipeline.
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
index 039b758bf98..302c19cd426 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
@@ -30,9 +30,8 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr
return azruntime.Pipeline{}, err
}
authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{
- AuxiliaryTenants: options.AuxiliaryTenants,
- InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP,
- Scopes: []string{conf.Audience + "/.default"},
+ AuxiliaryTenants: options.AuxiliaryTenants,
+ Scopes: []string{conf.Audience + "/.default"},
})
perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1)
copy(perRetry, plOpts.PerRetry)
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
index 765fbc6843d..54b3bb78d85 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
@@ -64,7 +64,6 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok
p.scopes = make([]string, len(opts.Scopes))
copy(p.scopes, opts.Scopes)
p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
- InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP,
AuthorizationHandler: azpolicy.AuthorizationHandler{
OnChallenge: p.onChallenge,
OnRequest: p.onRequest,
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
index 810ac9d9fab..83e15949aa3 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
@@ -8,6 +8,7 @@ package runtime
import (
"context"
+ "errors"
"fmt"
"net/http"
"net/url"
@@ -15,7 +16,6 @@ import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
@@ -45,9 +45,6 @@ func setDefaults(r *armpolicy.RegistrationOptions) {
if r.PollingDuration == 0 {
r.PollingDuration = 5 * time.Minute
}
- if len(r.StatusCodes) == 0 {
- r.StatusCodes = []int{http.StatusConflict}
- }
}
// NewRPRegistrationPolicy creates a policy object configured using the specified options.
@@ -91,7 +88,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error)
// make the original request
resp, err = req.Next()
// getting a 409 is the first indication that the RP might need to be registered, check error response
- if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) {
+ if err != nil || resp.StatusCode != http.StatusConflict {
return resp, err
}
var reqErr requestError
@@ -108,12 +105,17 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error)
// to the caller so its error unmarshalling will kick in
return resp, err
}
- res, err := resource.ParseResourceID(req.Raw().URL.Path)
+ // RP needs to be registered. start by getting the subscription ID from the original request
+ subID, err := getSubscription(req.Raw().URL.Path)
+ if err != nil {
+ return resp, err
+ }
+ // now get the RP from the error
+ rp, err = getProvider(reqErr)
if err != nil {
return resp, err
}
- rp = res.ResourceType.Namespace
- logRegistrationExit := func(v any) {
+ logRegistrationExit := func(v interface{}) {
log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v)
}
log.Writef(LogRPRegistration, "BEGIN registration for %s", rp)
@@ -122,7 +124,7 @@ func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error)
rpOps := &providersOperations{
p: r.pipeline,
u: r.endpoint,
- subID: res.SubscriptionID,
+ subID: subID,
}
if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil {
logRegistrationExit(err)
@@ -187,13 +189,36 @@ func isUnregisteredRPCode(errorCode string) bool {
return false
}
+func getSubscription(path string) (string, error) {
+ parts := strings.Split(path, "/")
+ for i, v := range parts {
+ if v == "subscriptions" && (i+1) < len(parts) {
+ return parts[i+1], nil
+ }
+ }
+ return "", fmt.Errorf("failed to obtain subscription ID from %s", path)
+}
+
+func getProvider(re requestError) (string, error) {
+ if len(re.ServiceError.Details) > 0 {
+ return re.ServiceError.Details[0].Target, nil
+ }
+ return "", errors.New("unexpected empty Details")
+}
+
// minimal error definitions to simplify detection
type requestError struct {
ServiceError *serviceError `json:"error"`
}
type serviceError struct {
- Code string `json:"code"`
+ Code string `json:"code"`
+ Details []serviceErrorDetails `json:"details"`
+}
+
+type serviceErrorDetails struct {
+ Code string `json:"code"`
+ Target string `json:"target"`
}
///////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
index 99348527b54..aab9218538d 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
@@ -23,7 +23,7 @@ pr:
- sdk/azcore/
- eng/
-extends:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
+stages:
+- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: azcore
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index 3041984d9b1..8d1ae213c95 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -51,15 +51,15 @@ type Request struct {
values opValues
}
-type opValues map[reflect.Type]any
+type opValues map[reflect.Type]interface{}
// Set adds/changes a value
-func (ov opValues) set(value any) {
+func (ov opValues) set(value interface{}) {
ov[reflect.TypeOf(value)] = value
}
// Get looks for a value set by SetValue first
-func (ov opValues) get(value any) bool {
+func (ov opValues) get(value interface{}) bool {
v, ok := ov[reflect.ValueOf(value).Elem().Type()]
if ok {
reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v))
@@ -108,7 +108,7 @@ func (req *Request) Next() (*http.Response, error) {
}
// SetOperationValue adds/changes a mutable key/value associated with a single operation.
-func (req *Request) SetOperationValue(value any) {
+func (req *Request) SetOperationValue(value interface{}) {
if req.values == nil {
req.values = opValues{}
}
@@ -116,7 +116,7 @@ func (req *Request) SetOperationValue(value any) {
}
// OperationValue looks for a value set by SetOperationValue().
-func (req *Request) OperationValue(value any) bool {
+func (req *Request) OperationValue(value interface{}) bool {
if req.values == nil {
return false
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
index 08a95458730..bd348b868bf 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
@@ -59,7 +59,7 @@ func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error
}
func extractErrorCodeJSON(body []byte) string {
- var rawObj map[string]any
+ var rawObj map[string]interface{}
if err := json.Unmarshal(body, &rawObj); err != nil {
// not a JSON object
return ""
@@ -68,7 +68,7 @@ func extractErrorCodeJSON(body []byte) string {
// check if this is a wrapped error, i.e. { "error": { ... } }
// if so then unwrap it
if wrapped, ok := rawObj["error"]; ok {
- unwrapped, ok := wrapped.(map[string]any)
+ unwrapped, ok := wrapped.(map[string]interface{})
if !ok {
return ""
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
index 6fc6d1400e7..5cb87de2cb5 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
@@ -30,7 +30,7 @@ func Write(cls log.Event, msg string) {
// Writef invokes the underlying listener with the specified event and formatted message.
// If the event shouldn't be logged or there is no listener then Writef does nothing.
-func Writef(cls log.Event, format string, a ...any) {
+func Writef(cls log.Event, format string, a ...interface{}) {
log.Writef(cls, format, a...)
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
index ccd4794e9e9..b05bd8b38d2 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
@@ -27,7 +27,7 @@ func Applicable(resp *http.Response) bool {
}
// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
+func CanResume(token map[string]interface{}) bool {
_, ok := token["asyncURL"]
return ok
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
index 0d781b31d0c..2bb9e105b66 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
@@ -29,7 +29,7 @@ func Applicable(resp *http.Response) bool {
}
// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
+func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
index 51aede8a2b8..25983471867 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
@@ -26,7 +26,7 @@ func Applicable(resp *http.Response) bool {
}
// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
+func CanResume(token map[string]interface{}) bool {
_, ok := token["fakeURL"]
return ok
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
index 7a56c5211b7..d6be89876ab 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
@@ -28,7 +28,7 @@ func Applicable(resp *http.Response) bool {
}
// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
+func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
@@ -103,10 +103,6 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
// any 2xx other than a 202 indicates success
p.CurState = poller.StatusSucceeded
- } else if pollers.IsNonTerminalHTTPStatusCode(resp) {
- // the request timed out or is being throttled.
- // DO NOT include this as a terminal failure. preserve
- // the existing state and return the response.
} else {
p.CurState = poller.StatusFailed
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
index ac1c0efb5ac..1bc7ad0aced 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
@@ -25,7 +25,7 @@ func Applicable(resp *http.Response) bool {
}
// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
+func CanResume(token map[string]interface{}) bool {
_, ok := token["oplocURL"]
return ok
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
index eb3cf651db0..d8d86a46c2d 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
@@ -74,7 +74,7 @@ func ExtractToken(token string) ([]byte, error) {
// IsTokenValid returns an error if the specified token isn't applicable for generic type T.
func IsTokenValid[T any](token string) error {
- raw := map[string]any{}
+ raw := map[string]interface{}{}
if err := json.Unmarshal([]byte(token), &raw); err != nil {
return err
}
@@ -185,16 +185,3 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
}
return nil
}
-
-// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be
-// considered non-terminal thus eligible for retry.
-func IsNonTerminalHTTPStatusCode(resp *http.Response) bool {
- return exported.HasStatusCode(resp,
- http.StatusRequestTimeout, // 408
- http.StatusTooManyRequests, // 429
- http.StatusInternalServerError, // 500
- http.StatusBadGateway, // 502
- http.StatusServiceUnavailable, // 503
- http.StatusGatewayTimeout, // 504
- )
-}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 03691cbf024..330bf9a60b7 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.11.1"
+ Version = "v1.10.0"
)
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
index 8d984535887..d934f1dc5fa 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
@@ -39,11 +39,6 @@ type ClientOptions struct {
// Cloud specifies a cloud for the client. The default is Azure Public Cloud.
Cloud cloud.Configuration
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the credential in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
// Logging configures the built-in logging policy.
Logging LogOptions
@@ -152,11 +147,6 @@ type BearerTokenOptions struct {
// When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from
// its given credential.
AuthorizationHandler AuthorizationHandler
-
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
}
// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index cb2a6952805..f0f28035595 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -24,7 +24,6 @@ type BearerTokenPolicy struct {
authzHandler policy.AuthorizationHandler
cred exported.TokenCredential
scopes []string
- allowHTTP bool
}
type acquiringResourceState struct {
@@ -56,7 +55,6 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
- allowHTTP: opts.InsecureAllowCredentialWithHTTP,
}
}
@@ -82,7 +80,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return req.Next()
}
- if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil {
+ if err := checkHTTPSForAuth(req); err != nil {
return nil, err
}
@@ -115,8 +113,8 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return res, err
}
-func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error {
- if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP {
+func checkHTTPSForAuth(req *policy.Request) error {
+ if strings.ToLower(req.Raw().URL.Scheme) != "https" {
return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints"))
}
return nil
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
index eeb1c09cc12..6f577fa7a9e 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
@@ -12,19 +12,13 @@ import (
// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential].
type KeyCredentialPolicy struct {
- cred *exported.KeyCredential
- header string
- prefix string
- allowHTTP bool
+ cred *exported.KeyCredential
+ header string
+ prefix string
}
// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy].
type KeyCredentialPolicyOptions struct {
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
// Prefix is used if the key requires a prefix before it's inserted into the HTTP request.
Prefix string
}
@@ -38,10 +32,9 @@ func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options
options = &KeyCredentialPolicyOptions{}
}
return &KeyCredentialPolicy{
- cred: cred,
- header: header,
- prefix: options.Prefix,
- allowHTTP: options.InsecureAllowCredentialWithHTTP,
+ cred: cred,
+ header: header,
+ prefix: options.Prefix,
}
}
@@ -51,7 +44,7 @@ func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
// this prevents a panic that might be hard to diagnose and allows testing
// against http endpoints that don't require authentication.
if k.cred != nil {
- if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil {
+ if err := checkHTTPSForAuth(req); err != nil {
return nil, err
}
val := exported.KeyCredentialGet(k.cred)
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
index 3964beea862..ebe2b7772ba 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
@@ -12,17 +12,13 @@ import (
// SASCredentialPolicy authorizes requests with a [azcore.SASCredential].
type SASCredentialPolicy struct {
- cred *exported.SASCredential
- header string
- allowHTTP bool
+ cred *exported.SASCredential
+ header string
}
// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy].
type SASCredentialPolicyOptions struct {
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
+ // placeholder for future optional values
}
// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy].
@@ -30,13 +26,9 @@ type SASCredentialPolicyOptions struct {
// - header is the name of the HTTP request header in which the shared access signature is placed
// - options contains optional configuration, pass nil to accept the default values
func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy {
- if options == nil {
- options = &SASCredentialPolicyOptions{}
- }
return &SASCredentialPolicy{
- cred: cred,
- header: header,
- allowHTTP: options.InsecureAllowCredentialWithHTTP,
+ cred: cred,
+ header: header,
}
}
@@ -46,7 +38,7 @@ func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
// this prevents a panic that might be hard to diagnose and allows testing
// against http endpoints that don't require authentication.
if k.cred != nil {
- if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil {
+ if err := checkHTTPSForAuth(req); err != nil {
return nil, err
}
req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred))
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index 03f76c9aa8e..c373f68962e 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -154,7 +154,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
if err != nil {
return nil, err
}
- var asJSON map[string]any
+ var asJSON map[string]interface{}
if err := json.Unmarshal(raw, &asJSON); err != nil {
return nil, err
}
@@ -240,7 +240,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt
}
start := time.Now()
- logPollUntilDoneExit := func(v any) {
+ logPollUntilDoneExit := func(v interface{}) {
log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start))
}
log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op)
@@ -334,11 +334,6 @@ func (p *Poller[T]) Result(ctx context.Context) (res T, err error) {
err = p.op.Result(ctx, p.result)
var respErr *exported.ResponseError
if errors.As(err, &respErr) {
- if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) {
- // the request failed in a non-terminal way.
- // don't cache the error or mark the Poller as done
- return
- }
// the LRO failed. record the error
p.err = err
} else if err != nil {
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
index 06ac95b1b71..bef05f2a3d9 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
@@ -11,11 +11,9 @@ import (
"context"
"encoding/json"
"encoding/xml"
- "errors"
"fmt"
"io"
"mime/multipart"
- "net/textproto"
"net/url"
"path"
"strings"
@@ -23,7 +21,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
)
// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
@@ -112,7 +109,7 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
-func MarshalAsJSON(req *policy.Request, v any) error {
+func MarshalAsJSON(req *policy.Request, v interface{}) error {
b, err := json.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
@@ -122,7 +119,7 @@ func MarshalAsJSON(req *policy.Request, v any) error {
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
-func MarshalAsXML(req *policy.Request, v any) error {
+func MarshalAsXML(req *policy.Request, v interface{}) error {
b, err := xml.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
@@ -132,10 +129,10 @@ func MarshalAsXML(req *policy.Request, v any) error {
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
}
-// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value.
-// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent].
-// Byte slices will be treated as JSON. All other values are treated as string values.
-func SetMultipartFormData(req *policy.Request, formData map[string]any) error {
+// SetMultipartFormData writes the specified keys/values as multi-part form
+// fields with the specified value. File content must be specified as a ReadSeekCloser.
+// All other values are treated as string values.
+func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error {
body := bytes.Buffer{}
writer := multipart.NewWriter(&body)
@@ -151,60 +148,6 @@ func SetMultipartFormData(req *policy.Request, formData map[string]any) error {
return nil
}
- quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
-
- writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error {
- if mpc.Body == nil {
- return errors.New("streaming.MultipartContent.Body cannot be nil")
- }
-
- // use fieldname for the file name when unspecified
- filename := fieldname
-
- if mpc.ContentType == "" && mpc.Filename == "" {
- return writeContent(fieldname, filename, mpc.Body)
- }
- if mpc.Filename != "" {
- filename = mpc.Filename
- }
- // this is pretty much copied from multipart.Writer.CreateFormFile
- // but lets us set the caller provided Content-Type and filename
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename)))
- contentType := "application/octet-stream"
- if mpc.ContentType != "" {
- contentType = mpc.ContentType
- }
- h.Set("Content-Type", contentType)
- fd, err := writer.CreatePart(h)
- if err != nil {
- return err
- }
- // copy the data to the form file
- if _, err = io.Copy(fd, mpc.Body); err != nil {
- return err
- }
- return nil
- }
-
- // the same as multipart.Writer.WriteField but lets us specify the Content-Type
- writeField := func(fieldname, contentType string, value string) error {
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname)))
- h.Set("Content-Type", contentType)
- fd, err := writer.CreatePart(h)
- if err != nil {
- return err
- }
- if _, err = fd.Write([]byte(value)); err != nil {
- return err
- }
- return nil
- }
-
for k, v := range formData {
if rsc, ok := v.(io.ReadSeekCloser); ok {
if err := writeContent(k, k, rsc); err != nil {
@@ -218,35 +161,13 @@ func SetMultipartFormData(req *policy.Request, formData map[string]any) error {
}
}
continue
- } else if mpc, ok := v.(streaming.MultipartContent); ok {
- if err := writeMultipartContent(k, mpc); err != nil {
- return err
- }
- continue
- } else if mpcs, ok := v.([]streaming.MultipartContent); ok {
- for _, mpc := range mpcs {
- if err := writeMultipartContent(k, mpc); err != nil {
- return err
- }
- }
- continue
}
-
- var content string
- contentType := shared.ContentTypeTextPlain
- switch tt := v.(type) {
- case []byte:
- // JSON, don't quote it
- content = string(tt)
- contentType = shared.ContentTypeAppJSON
- case string:
- content = tt
- default:
- // ensure the value is in string format
- content = fmt.Sprintf("%v", v)
+ // ensure the value is in string format
+ s, ok := v.(string)
+ if !ok {
+ s = fmt.Sprintf("%v", v)
}
-
- if err := writeField(k, contentType, content); err != nil {
+ if err := writer.WriteField(k, s); err != nil {
return err
}
}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
index 048566e02c0..003c875b1f5 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
@@ -40,7 +40,7 @@ func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding)
}
// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v.
-func UnmarshalAsJSON(resp *http.Response, v any) error {
+func UnmarshalAsJSON(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
@@ -61,7 +61,7 @@ func UnmarshalAsJSON(resp *http.Response, v any) error {
}
// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v.
-func UnmarshalAsXML(resp *http.Response, v any) error {
+func UnmarshalAsXML(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
index 2468540bd75..fbcd48311b8 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
@@ -73,17 +73,3 @@ func (p *progress) Seek(offset int64, whence int) (int64, error) {
func (p *progress) Close() error {
return p.rc.Close()
}
-
-// MultipartContent contains streaming content used in multipart/form payloads.
-type MultipartContent struct {
- // Body contains the required content body.
- Body io.ReadSeekCloser
-
- // ContentType optionally specifies the HTTP Content-Type for this Body.
- // The default value is application/octet-stream.
- ContentType string
-
- // Filename optionally specifies the filename for this Body.
- // The default value is the field name for the multipart/form section.
- Filename string
-}
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore
deleted file mode 100644
index 8cdb9103650..00000000000
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# live test artifacts
-Dockerfile
-k8s.yaml
-sshkey*
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index 6d4b6feb86e..71dcb5f3e95 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,66 +1,5 @@
# Release History
-## 1.6.0 (2024-06-10)
-
-### Features Added
-* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential
- that authenticates with client assertions such as federated credentials
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.6.0-beta.4
-* Removed `AzurePipelinesCredential` and the persistent token caching API.
- They will return in v1.7.0-beta.1
-
-### Bugs Fixed
-* Managed identity bug fixes
-
-## 1.6.0-beta.4 (2024-05-14)
-
-### Features Added
-* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with
- workload identity federation
-
-## 1.6.0-beta.3 (2024-04-09)
-
-### Breaking Changes
-* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity
- environments to avoid excessive retry delays when the IMDS endpoint is not available. This
- should improve credential chain resolution for local development scenarios.
-
-### Bugs Fixed
-* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances
-
-## 1.5.2 (2024-04-09)
-
-### Bugs Fixed
-* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances
-
-### Other Changes
-* Restored v1.4.0 error behavior for empty tenant IDs
-* Upgraded dependencies
-
-## 1.6.0-beta.2 (2024-02-06)
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.6.0-beta.1
-* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct
- type that carries the `TokenRequestOptions` passed to the `GetToken` call which
- returned the error.
-
-### Bugs Fixed
-* Fixed more cases in which credential chains like `DefaultAzureCredential`
- should try their next credential after attempting managed identity
- authentication in a Docker Desktop container
-
-### Other Changes
-* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration
-
-## 1.6.0-beta.1 (2024-01-17)
-
-### Features Added
-* Restored persistent token caching API first added in v1.5.0-beta.1
-* Added `AzureCLICredentialOptions.Subscription`
-
## 1.5.1 (2024-01-17)
### Bugs Fixed
@@ -187,7 +126,7 @@
### Features Added
* By default, credentials set client capability "CP1" to enable support for
- [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation).
+ [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation).
This indicates to Microsoft Entra ID that your application can handle CAE claims challenges.
You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true".
* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
index 4404be82449..1a649202303 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
@@ -1,6 +1,6 @@
# Migrating from autorest/adal to azidentity
-`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
+`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`.
@@ -284,7 +284,7 @@ if err == nil {
}
```
-Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview).
+Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview).
## Use azidentity credentials with older packages
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index b5acff0e632..b6ad2d39f84 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -1,9 +1,9 @@
# Azure Identity Client Module for Go
-The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
+The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
-| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/)
+| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/)
| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity)
# Getting started
@@ -30,7 +30,7 @@ When debugging and executing code locally, developers typically use their own ac
#### Authenticating via the Azure CLI
`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user
-signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user.
+signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user.
When no default browser is available, `az login` will use the device code
authentication flow. This can also be selected manually by running `az login --use-device-code`.
@@ -69,14 +69,14 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID.
## Managed Identity
`DefaultAzureCredential` and `ManagedIdentityCredential` support
-[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview)
+[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview)
in any hosting environment which supports managed identities, such as (this list is not exhaustive):
-* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity)
-* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)
-* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization)
-* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity)
-* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)
-* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token)
+* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity)
+* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)
+* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization)
+* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity)
+* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)
+* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token)
## Examples
@@ -207,7 +207,7 @@ For more details, see the [token caching documentation](https://aka.ms/azsdk/go/
Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot).
-For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes).
+For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes).
### Logging
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
index f9cc4894339..c0d6601469c 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
@@ -45,7 +45,7 @@ With persistent disk token caching enabled, the library first determines if a va
#### Example code
-See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data.
+See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data.
### Credentials supporting token caching
diff --git a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index 3564e685e18..832c599eb90 100644
--- a/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/upstream/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -58,7 +58,7 @@ This error contains several pieces of information:
- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`.
-- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
+- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures.
@@ -97,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication)
| Error Code | Issue | Mitigation |
|---|---|---|
-|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
+|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
## Troubleshoot ClientCertificateCredential authentication issues
| Error Code | Description | Mitigation |
|---|---|---|
-|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
+|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
## Troubleshoot UsernamePasswordCredential authentication issues
@@ -123,20 +123,20 @@ azlog.SetEvents(azidentity.EventAuthentication)
|Host Environment| | |
|---|---|---|
-|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
-|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
+|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
+|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)|
-|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
-|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)||
+|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
+|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)||
### Azure Virtual Machine managed identity
| Error Message |Description| Mitigation |
|---|---|---|
-|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).|
+|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).|
|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`|
-|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|
Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
-|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
Refer to the error message for more details on specific failures.
Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
+|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|
Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
+|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
Refer to the error message for more details on specific failures.
Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
#### Verify IMDS is available on the VM
@@ -152,7 +152,7 @@ curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://man
| Error Message |Description| Mitigation |
|---|---|---|
-|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
|
+|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
|
#### Verify the App Service managed identity endpoint is available
@@ -177,8 +177,8 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
| Error Message |Description| Mitigation |
|---|---|---|
-|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
Validate the installation location is in the application's `PATH` environment variable.
|
-|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
|
+|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
Validate the installation location is in the application's `PATH` environment variable.
|
+|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
" as text instead of an element.
- // rawTag's contents are lower-cased.
- rawTag string
- // textIsRaw is whether the current text token's data is not escaped.
- textIsRaw bool
- // convertNUL is whether NUL bytes in the current token's data should
- // be converted into \ufffd replacement characters.
- convertNUL bool
- // allowCDATA is whether CDATA sections are allowed in the current context.
- allowCDATA bool
-}
-
-// AllowCDATA sets whether or not the tokenizer recognizes as
-// the text "foo". The default value is false, which means to recognize it as
-// a bogus comment "" instead.
-//
-// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
-// only if tokenizing foreign content, such as MathML and SVG. However,
-// tracking foreign-contentness is difficult to do purely in the tokenizer,
-// as opposed to the parser, due to HTML integration points: an