From f577be94bba6011f88ff742aedc9f58fc9923e8d Mon Sep 17 00:00:00 2001 From: Rafael Fonseca Date: Fri, 7 Mar 2025 16:38:27 +0100 Subject: [PATCH] =?UTF-8?q?=F0=9F=8C=B1=20Bump=20golangci-lint=20to=20v1.6?= =?UTF-8?q?2.2=20and=20fix=20all=20lint=20errors?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Manual cherry-pick of https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/5255 --- .github/workflows/pr-golangci-lint.yaml | 2 +- .../eks/controllers/eksconfig_controller.go | 2 +- .../eksconfig_controller_reconciler_test.go | 58 +++++++++---------- cmd/clusterawsadm/ami/helper.go | 2 +- .../cloudformation/bootstrap/template_test.go | 3 +- controllers/awscluster_controller.go | 8 +-- controllers/awsmachine_controller.go | 20 +++---- .../awsmanagedcontrolplane_controller.go | 9 ++- .../rosacontrolplane_controller.go | 9 ++- .../v1beta2/awsmanagedmachinepool_webhook.go | 18 +++--- exp/controllers/awsmachinepool_controller.go | 6 +- .../awsmachinepool_controller_test.go | 50 +++++++++------- exp/controllers/rosamachinepool_controller.go | 2 +- pkg/cloud/scope/fargate.go | 1 + pkg/cloud/scope/managednodegroup.go | 2 + pkg/cloud/scope/rosamachinepool.go | 7 ++- pkg/cloud/scope/session.go | 20 ++++--- .../services/autoscaling/autoscalinggroup.go | 8 +-- pkg/cloud/services/ec2/bastion.go | 2 +- pkg/cloud/services/ec2/launchtemplate.go | 12 ++-- pkg/cloud/services/eks/eks.go | 10 ++-- pkg/cloud/services/eks/fargate.go | 4 ++ pkg/cloud/services/eks/nodegroup.go | 6 +- pkg/cloud/services/elb/loadbalancer.go | 6 +- pkg/cloud/services/network/network.go | 36 ++++++------ .../services/securitygroup/securitygroups.go | 4 +- pkg/internal/cidr/cidr.go | 8 +-- pkg/rosa/helpers.go | 2 +- test/e2e/shared/aws.go | 4 +- test/e2e/shared/exec.go | 2 +- test/e2e/suites/unmanaged/helpers_test.go | 2 +- 31 files changed, 173 insertions(+), 152 deletions(-) diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 86309ebeda..cca9d5b257 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -28,6 +28,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # tag=v6.0.1 with: - version: v1.56.1 + version: v1.62.2 args: --out-format=colored-line-number working-directory: ${{matrix.working-directory}} diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index 5aa9425dd5..b128daac2c 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -231,7 +231,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 files, err := r.resolveFiles(ctx, config) if err != nil { log.Info("Failed to resolve files for user data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index cdad8ed84b..163b94a338 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -54,16 +54,16 @@ func TestEKSConfigReconciler(t *testing.T) { reconciler := EKSConfigReconciler{ Client: testEnv.Client, } - t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)) + t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name) g.Eventually(func(gomega Gomega) { err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine")) gomega.Expect(err).NotTo(HaveOccurred()) }).Should(Succeed()) - t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name)) + t.Logf("Secret '%s' should exist and be correct", config.Name) secretList := &corev1.SecretList{} testEnv.Client.List(ctx, secretList) - t.Logf(dump("secrets", secretList)) + t.Log(dump("secrets", secretList)) secret := &corev1.Secret{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{ @@ -91,10 +91,10 @@ func TestEKSConfigReconciler(t *testing.T) { }, } config.Status.DataSecretName = &mp.Name - t.Logf(dump("amcp", amcp)) - t.Logf(dump("config", config)) - t.Logf(dump("machinepool", mp)) - t.Logf(dump("cluster", cluster)) + t.Log(dump("amcp", amcp)) + t.Log(dump("config", config)) + t.Log(dump("machinepool", mp)) + t.Log(dump("cluster", cluster)) oldUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"}) g.Expect(err).To(BeNil()) expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "updated-test-value"}) @@ -103,21 +103,21 @@ func TestEKSConfigReconciler(t *testing.T) { amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{} testEnv.Client.List(ctx, amcpList) - t.Logf(dump("stored-amcps", amcpList)) + t.Log(dump("stored-amcps", amcpList)) reconciler := EKSConfigReconciler{ Client: testEnv.Client, } - t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)) + t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name) g.Eventually(func(gomega Gomega) { err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool")) gomega.Expect(err).NotTo(HaveOccurred()) }).Should(Succeed()) - t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name)) + t.Logf("Secret '%s' should exist and be correct", config.Name) secretList := &corev1.SecretList{} testEnv.Client.List(ctx, secretList) - t.Logf(dump("secrets", secretList)) + t.Log(dump("secrets", secretList)) secret := &corev1.Secret{} g.Eventually(func(gomega Gomega) { @@ -132,15 +132,15 @@ func TestEKSConfigReconciler(t *testing.T) { config.Spec.KubeletExtraArgs = map[string]string{ "test-arg": "updated-test-value", } - t.Logf(dump("config", config)) + t.Log(dump("config", config)) g.Eventually(func(gomega Gomega) { err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool")) gomega.Expect(err).NotTo(HaveOccurred()) }).Should(Succeed()) - t.Logf(fmt.Sprintf("Secret '%s' should exist and be up to date", config.Name)) + t.Logf("Secret '%s' should exist and be up to date", config.Name) testEnv.Client.List(ctx, secretList) - t.Logf(dump("secrets", secretList)) + t.Log(dump("secrets", secretList)) g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{ Name: config.Name, @@ -156,10 +156,10 @@ func TestEKSConfigReconciler(t *testing.T) { cluster := newCluster(amcp.Name) machine := newMachine(cluster, "test-machine") config := newEKSConfig(machine) - t.Logf(dump("amcp", amcp)) - t.Logf(dump("config", config)) - t.Logf(dump("machine", machine)) - t.Logf(dump("cluster", cluster)) + t.Log(dump("amcp", amcp)) + t.Log(dump("config", config)) + t.Log(dump("machine", machine)) + t.Log(dump("cluster", cluster)) expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"}) g.Expect(err).To(BeNil()) g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) @@ -174,21 +174,21 @@ func TestEKSConfigReconciler(t *testing.T) { amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{} testEnv.Client.List(ctx, amcpList) - t.Logf(dump("stored-amcps", amcpList)) + t.Log(dump("stored-amcps", amcpList)) reconciler := EKSConfigReconciler{ Client: testEnv.Client, } - t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)) + t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name) g.Eventually(func(gomega Gomega) { err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine")) gomega.Expect(err).NotTo(HaveOccurred()) }).Should(Succeed()) - t.Logf(fmt.Sprintf("Secret '%s' should exist and be out of date", config.Name)) + t.Logf("Secret '%s' should exist and be out of date", config.Name) secretList := &corev1.SecretList{} testEnv.Client.List(ctx, secretList) - t.Logf(dump("secrets", secretList)) + t.Log(dump("secrets", secretList)) secret = &corev1.Secret{} g.Eventually(func(gomega Gomega) { @@ -226,11 +226,11 @@ func TestEKSConfigReconciler(t *testing.T) { "secretKey": []byte(secretContent), }, } - t.Logf(dump("amcp", amcp)) - t.Logf(dump("config", config)) - t.Logf(dump("machine", machine)) - t.Logf(dump("cluster", cluster)) - t.Logf(dump("secret", secret)) + t.Log(dump("amcp", amcp)) + t.Log(dump("config", config)) + t.Log(dump("machine", machine)) + t.Log(dump("cluster", cluster)) + t.Log(dump("secret", secret)) g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed()) g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed()) @@ -252,7 +252,7 @@ func TestEKSConfigReconciler(t *testing.T) { reconciler := EKSConfigReconciler{ Client: testEnv.Client, } - t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)) + t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name) g.Eventually(func(gomega Gomega) { err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine")) gomega.Expect(err).NotTo(HaveOccurred()) @@ -260,7 +260,7 @@ func TestEKSConfigReconciler(t *testing.T) { secretList := &corev1.SecretList{} testEnv.Client.List(ctx, secretList) - t.Logf(dump("secrets", secretList)) + t.Log(dump("secrets", secretList)) gotSecret := &corev1.Secret{} g.Eventually(func(gomega Gomega) { gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{ diff --git a/cmd/clusterawsadm/ami/helper.go b/cmd/clusterawsadm/ami/helper.go index ebc393084c..81145b19b2 100644 --- a/cmd/clusterawsadm/ami/helper.go +++ b/cmd/clusterawsadm/ami/helper.go @@ -67,7 +67,7 @@ func LatestPatchRelease(searchVersion string) (string, error) { if err != nil { return "", err } - resp, err := http.Get(fmt.Sprintf(latestStableReleaseURL, "-"+strconv.Itoa(int(searchSemVer.Major))+"."+strconv.Itoa(int(searchSemVer.Minor)))) + resp, err := http.Get(fmt.Sprintf(latestStableReleaseURL, "-"+strconv.Itoa(int(searchSemVer.Major))+"."+strconv.Itoa(int(searchSemVer.Minor)))) //#nosec G115 if err != nil { return "", err } diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go index e47fbbd047..81552bf2d5 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/template_test.go @@ -18,7 +18,6 @@ package bootstrap import ( "bytes" - "fmt" "os" "path" "testing" @@ -206,7 +205,7 @@ func TestRenderCloudformation(t *testing.T) { dmp := diffmatchpatch.New() diffs := dmp.DiffMain(string(tData), string(data), false) out := dmp.DiffPrettyText(diffs) - t.Fatalf(fmt.Sprintf("Differing output (%s):\n%s", c.fixture, out)) + t.Fatalf("Differing output (%s):\n%s", c.fixture, out) } }) } diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index 13db38000a..01393e5184 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -278,7 +278,7 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.Cluster if err := elbService.ReconcileLoadbalancers(); err != nil { clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) + conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return nil, err } @@ -330,12 +330,12 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) if err := sgService.ReconcileSecurityGroups(); err != nil { clusterScope.Error(err, "failed to reconcile security groups") - conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) + conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return reconcile.Result{}, err } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error()) + conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) clusterScope.Error(err, "failed to reconcile bastion host") return reconcile.Result{}, err } @@ -355,7 +355,7 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) } if err := s3Service.ReconcileBucket(); err != nil { - conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 8f5773d0f1..dedc53187d 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -335,7 +335,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, // all the other errors are blocking. // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } } @@ -374,7 +374,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, if err := ec2Service.TerminateInstance(instance.ID); err != nil { machineScope.Error(err, "failed to terminate instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return ctrl.Result{}, err } @@ -402,7 +402,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { machineScope.Error(err, "failed to detach security groups from instance's network interfaces") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, err } } @@ -494,7 +494,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * instance, err := r.findInstance(machineScope, ec2svc) if err != nil { machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, err.Error()) + conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -527,7 +527,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope * instance, err = r.createInstance(ec2svc, machineScope, clusterScope, objectStoreSvc) if err != nil { machineScope.Error(err, "unable to create instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } } @@ -658,7 +658,7 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) machineScope.Error(err, "unable to ensure security groups") return err } @@ -987,7 +987,7 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(machineScope *scope.M if err := elbsvc.RegisterInstanceWithAPIServerELB(i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", @@ -1019,7 +1019,7 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(machineScope *scope.Machin if err := elbsvc.RegisterInstanceWithAPIServerLB(instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", @@ -1043,7 +1043,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(machineScope *sco if err := elbsvc.DeregisterInstanceFromAPIServerELB(instance); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID) } @@ -1068,7 +1068,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(machineScope *scope.Ma if err := elbsvc.DeregisterInstanceFromAPIServerLB(targetGroupArn, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID) } } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 1c4d29ed86..c32d512c50 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -171,7 +171,6 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, WithOptions(options). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)). Build(r) - if err != nil { return fmt.Errorf("failed setting up the AWSManagedControlPlane controller manager: %w", err) } @@ -340,12 +339,12 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := sgService.ReconcileSecurityGroups(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -354,7 +353,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := awsnodeService.ReconcileCNI(ctx); err != nil { - conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -370,7 +369,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { - conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition) diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index e846a7a718..629ed4eea2 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -98,7 +98,6 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c WithOptions(options). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(log.GetLogger(), r.WatchFilterValue)). Build(r) - if err != nil { return fmt.Errorf("failed setting up the AWSManagedControlPlane controller manager: %w", err) } @@ -222,6 +221,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, clusterv1.ConditionSeverityError, + "%s", validationMessage) // dont' requeue because input is invalid and manual intervention is needed. return ctrl.Result{}, nil @@ -278,6 +278,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosacontrolplanev1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), clusterv1.ConditionSeverityError, + "%s", cluster.Status().ProvisionErrorCode()) // Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued. return ctrl.Result{}, nil @@ -287,6 +288,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosacontrolplanev1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), clusterv1.ConditionSeverityInfo, + "%s", cluster.Status().Description()) rosaScope.Info("waiting for cluster to become ready", "state", cluster.Status().State()) @@ -305,6 +307,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosacontrolplanev1.ROSAControlPlaneReadyCondition, rosacontrolplanev1.ReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) } @@ -454,6 +457,7 @@ func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAContr rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, clusterv1.ConditionSeverityError, + "%s", err.Error()) return err } @@ -475,6 +479,7 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, rosacontrolplanev1.ExternalAuthConfiguredCondition, rosacontrolplanev1.ReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error()) } else { conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) @@ -993,6 +998,6 @@ func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1.APIEndpoint, error) { return &clusterv1.APIEndpoint{ Host: host, - Port: int32(port), // #nosec G109 + Port: int32(port), //#nosec G109 G115 }, nil } diff --git a/exp/api/v1beta2/awsmanagedmachinepool_webhook.go b/exp/api/v1beta2/awsmanagedmachinepool_webhook.go index effd87a2d1..6c7e901465 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_webhook.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_webhook.go @@ -59,18 +59,18 @@ func (r *AWSManagedMachinePool) validateScaling() field.ErrorList { if r.Spec.Scaling != nil { //nolint:nestif minField := field.NewPath("spec", "scaling", "minSize") maxField := field.NewPath("spec", "scaling", "maxSize") - min := r.Spec.Scaling.MinSize - max := r.Spec.Scaling.MaxSize - if min != nil { - if *min < 0 { - allErrs = append(allErrs, field.Invalid(minField, *min, "must be greater or equal zero")) + minSize := r.Spec.Scaling.MinSize + maxSize := r.Spec.Scaling.MaxSize + if minSize != nil { + if *minSize < 0 { + allErrs = append(allErrs, field.Invalid(minField, *minSize, "must be greater or equal zero")) } - if max != nil && *max < *min { - allErrs = append(allErrs, field.Invalid(maxField, *max, fmt.Sprintf("must be greater than field %s", minField.String()))) + if maxSize != nil && *maxSize < *minSize { + allErrs = append(allErrs, field.Invalid(maxField, *maxSize, fmt.Sprintf("must be greater than field %s", minField.String()))) } } - if max != nil && *max < 0 { - allErrs = append(allErrs, field.Invalid(maxField, *max, "must be greater than zero")) + if maxSize != nil && *maxSize < 0 { + allErrs = append(allErrs, field.Invalid(maxField, *maxSize, "must be greater than zero")) } } if len(allErrs) == 0 { diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 741cdcdb10..65ab252a95 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -242,7 +242,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP // Find existing ASG asg, err := r.findASG(machinePoolScope, asgsvc) if err != nil { - conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, err.Error()) + conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, "%s", err.Error()) return err } @@ -292,7 +292,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP if asg == nil { // Create new ASG if err := r.createPool(machinePoolScope, clusterScope); err != nil { - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } return nil @@ -344,7 +344,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.SetAnnotation("cluster-api-provider-aws", "true") machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList - machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) + machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) //#nosec G115 machinePoolScope.AWSMachinePool.Status.Ready = true conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition) diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 4902dbb7e7..932d64e0a4 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -424,7 +424,8 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, }, }, - Subnets: []string{"subnet1", "subnet2"}} + Subnets: []string{"subnet1", "subnet2"}, + } reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -442,7 +443,8 @@ func TestAWSMachinePoolReconciler(t *testing.T) { asg := expinfrav1.AutoScalingGroup{ MinSize: int32(0), MaxSize: int32(100), - Subnets: []string{"subnet1", "subnet2"}} + Subnets: []string{"subnet1", "subnet2"}, + } reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -460,7 +462,8 @@ func TestAWSMachinePoolReconciler(t *testing.T) { asg := expinfrav1.AutoScalingGroup{ MinSize: int32(0), MaxSize: int32(2), - Subnets: []string{}} + Subnets: []string{}, + } reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -800,8 +803,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }) } -//TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers. - +// TODO: This was taken from awsmachine_controller_test, i think it should be moved to elsewhere in both locations like test/helpers. type conditionAssertion struct { conditionType clusterv1.ConditionType status corev1.ConditionStatus @@ -844,9 +846,9 @@ func TestDiffASG(t *testing.T) { existingASG *expinfrav1.AutoScalingGroup } tests := []struct { - name string - args args - want bool + name string + args args + wantDifference bool }{ { name: "replicas != asg.desiredCapacity", @@ -862,7 +864,7 @@ func TestDiffASG(t *testing.T) { DesiredCapacity: ptr.To[int32](1), }, }, - want: true, + wantDifference: true, }, { name: "replicas (nil) != asg.desiredCapacity", @@ -878,7 +880,7 @@ func TestDiffASG(t *testing.T) { DesiredCapacity: ptr.To[int32](1), }, }, - want: true, + wantDifference: true, }, { name: "replicas != asg.desiredCapacity (nil)", @@ -894,7 +896,7 @@ func TestDiffASG(t *testing.T) { DesiredCapacity: nil, }, }, - want: true, + wantDifference: true, }, { name: "maxSize != asg.maxSize", @@ -916,7 +918,7 @@ func TestDiffASG(t *testing.T) { MaxSize: 2, }, }, - want: true, + wantDifference: true, }, { name: "minSize != asg.minSize", @@ -940,7 +942,7 @@ func TestDiffASG(t *testing.T) { MinSize: 1, }, }, - want: true, + wantDifference: true, }, { name: "capacityRebalance != asg.capacityRebalance", @@ -966,7 +968,7 @@ func TestDiffASG(t *testing.T) { CapacityRebalance: false, }, }, - want: true, + wantDifference: true, }, { name: "MixedInstancesPolicy != asg.MixedInstancesPolicy", @@ -1000,7 +1002,7 @@ func TestDiffASG(t *testing.T) { MixedInstancesPolicy: &expinfrav1.MixedInstancesPolicy{}, }, }, - want: true, + wantDifference: true, }, { name: "MixedInstancesPolicy.InstancesDistribution != asg.MixedInstancesPolicy.InstancesDistribution", @@ -1053,7 +1055,7 @@ func TestDiffASG(t *testing.T) { }, }, }, - want: true, + wantDifference: true, }, { name: "MixedInstancesPolicy.InstancesDistribution unset", @@ -1100,7 +1102,7 @@ func TestDiffASG(t *testing.T) { }, }, }, - want: false, + wantDifference: false, }, { name: "SuspendProcesses != asg.SuspendProcesses", @@ -1141,7 +1143,7 @@ func TestDiffASG(t *testing.T) { CurrentlySuspendProcesses: []string{"Launch", "Terminate"}, }, }, - want: true, + wantDifference: true, }, { name: "all matches", @@ -1179,7 +1181,7 @@ func TestDiffASG(t *testing.T) { }, }, }, - want: false, + wantDifference: false, }, { name: "externally managed annotation ignores difference between desiredCapacity and replicas", @@ -1203,7 +1205,7 @@ func TestDiffASG(t *testing.T) { DesiredCapacity: ptr.To[int32](1), }, }, - want: false, + wantDifference: false, }, { name: "without externally managed annotation ignores difference between desiredCapacity and replicas", @@ -1222,13 +1224,17 @@ func TestDiffASG(t *testing.T) { DesiredCapacity: ptr.To[int32](1), }, }, - want: true, + wantDifference: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(diffASG(tt.args.machinePoolScope, tt.args.existingASG) != "").To(Equal(tt.want)) + if tt.wantDifference { + g.Expect(diffASG(tt.args.machinePoolScope, tt.args.existingASG)).ToNot(BeEmpty()) + } else { + g.Expect(diffASG(tt.args.machinePoolScope, tt.args.existingASG)).To(BeEmpty()) + } }) } } diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 659793a857..2cc553ec2b 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -232,7 +232,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, fmt.Errorf("failed to ensure rosaMachinePool: %w", err) } - currentReplicas := int32(nodePool.Status().CurrentReplicas()) + currentReplicas := int32(nodePool.Status().CurrentReplicas()) //#nosec G115 if annotations.ReplicasManagedByExternalAutoscaler(machinePool) { // Set MachinePool replicas to rosa autoscaling replicas if *machinePool.Spec.Replicas != currentReplicas { diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 7a58137f6d..a5addbe134 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -178,6 +178,7 @@ func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error { expinfrav1.IAMFargateRolesReadyCondition, reason, severity, + "%s", err, ) if err := s.PatchObject(); err != nil { diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index e9421d7282..7411cc6df3 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -232,6 +232,7 @@ func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) expinfrav1.EKSNodegroupReadyCondition, reason, severity, + "%s", err, ) if err := s.PatchObject(); err != nil { @@ -252,6 +253,7 @@ func (s *ManagedMachinePoolScope) IAMReadyFalse(reason string, err string) error expinfrav1.IAMNodegroupRolesReadyCondition, reason, severity, + "%s", err, ) if err := s.PatchObject(); err != nil { diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 00d480ca3e..d0e643670a 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -99,8 +99,10 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo return scope, nil } -var _ cloud.Session = &RosaMachinePoolScope{} -var _ cloud.SessionMetadata = &RosaMachinePoolScope{} +var ( + _ cloud.Session = &RosaMachinePoolScope{} + _ cloud.SessionMetadata = &RosaMachinePoolScope{} +) // RosaMachinePoolScope defines the basic context for an actuator to operate upon. type RosaMachinePoolScope struct { @@ -201,6 +203,7 @@ func (s *RosaMachinePoolScope) RosaMchinePoolReadyFalse(reason string, err strin expinfrav1.RosaMachinePoolReadyCondition, reason, severity, + "%s", err, ) if err := s.PatchObject(); err != nil { diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index 546e11089b..9819937ab7 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -59,8 +59,10 @@ type ServiceEndpoint struct { SigningRegion string } -var sessionCache sync.Map -var providerCache sync.Map +var ( + sessionCache sync.Map + providerCache sync.Map +) type sessionCacheEntry struct { session *session.Session @@ -68,8 +70,7 @@ type sessionCacheEntry struct { } // SessionInterface is the interface for AWSCluster and ManagedCluster to be used to get session using identityRef. -var SessionInterface interface { -} +var SessionInterface interface{} func sessionForRegion(region string, endpoint []ServiceEndpoint) (*session.Session, throttle.ServiceLimiters, error) { if s, ok := sessionCache.Load(region); ok { @@ -123,7 +124,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return nil, nil, errors.Wrap(err, "Failed to get providers for cluster") } @@ -161,7 +162,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se // Check if identity credentials can be retrieved. One reason this will fail is that source identity is not authorized for assume role. _, err := providers[0].Retrieve() if err != nil { - conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, err.Error()) + conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, "%s", err.Error()) // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope sessionCache.Delete(getSessionName(region, clusterScoper)) @@ -257,7 +258,8 @@ func buildProvidersForRef( clusterScoper cloud.SessionMetadata, ref *infrav1.AWSIdentityReference, region string, - log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { + log logger.Wrapper, +) ([]identity.AWSPrincipalTypeProvider, error) { if ref == nil { log.Trace("AWSCluster does not have a IdentityRef specified") return providers, nil @@ -331,9 +333,9 @@ func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identity errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name) if clusterScoper.IdentityRef().Name == identityObjectKey.Name { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, errMsg) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) } else { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, errMsg) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) } } diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go index 9ddd4c086d..142ea011b1 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go @@ -45,9 +45,9 @@ func (s *Service) SDKToAutoScalingGroup(v *autoscaling.Group) (*expinfrav1.AutoS ID: aws.StringValue(v.AutoScalingGroupARN), Name: aws.StringValue(v.AutoScalingGroupName), // TODO(rudoi): this is just terrible - DesiredCapacity: aws.Int32(int32(aws.Int64Value(v.DesiredCapacity))), - MaxSize: int32(aws.Int64Value(v.MaxSize)), - MinSize: int32(aws.Int64Value(v.MinSize)), + DesiredCapacity: aws.Int32(int32(aws.Int64Value(v.DesiredCapacity))), //#nosec G115 + MaxSize: int32(aws.Int64Value(v.MaxSize)), //#nosec G115 + MinSize: int32(aws.Int64Value(v.MinSize)), //#nosec G115 CapacityRebalance: aws.BoolValue(v.CapacityRebalance), // TODO: determine what additional values go here and what else should be in the struct } @@ -517,7 +517,7 @@ func mapToTags(input map[string]string, resourceID *string) []*autoscaling.Tag { // SubnetIDs return subnet IDs of a AWSMachinePool based on given subnetIDs and filters. func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) { subnetIDs := make([]string, 0) - var inputFilters = make([]*ec2.Filter, 0) + inputFilters := make([]*ec2.Filter, 0) for _, subnet := range scope.AWSMachinePool.Spec.Subnets { switch { diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 826d03c6ef..77349edb07 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -120,7 +120,7 @@ func (s *Service) DeleteBastion() error { } if err := s.TerminateInstanceAndWait(instance.ID); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) record.Warnf(s.scope.InfraCluster(), "FailedTerminateBastion", "Failed to terminate bastion instance %q: %v", instance.ID, err) return errors.Wrap(err, "unable to delete bastion instance") } diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index 245248d7d2..e3e9b219fa 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -73,13 +73,13 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("checking for existing launch template") launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) return err } imageID, err := ec2svc.DiscoverLaunchTemplateAMI(scope) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } @@ -87,7 +87,7 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("no existing launch template found, creating") launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, bootstrapData) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } @@ -100,7 +100,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateIDStatus() == "" { launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) return err } scope.SetLaunchTemplateIDStatus(launchTemplateID) @@ -110,7 +110,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateLatestVersionStatus() == "" { launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) return err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) @@ -180,7 +180,7 @@ func (s *Service) ReconcileLaunchTemplate( if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { if err := runPostLaunchTemplateUpdateOperation(); err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition) diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index 958230bccd..f47d4f9876 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -37,28 +37,28 @@ func (s *Service) ReconcileControlPlane(ctx context.Context) error { // Control Plane IAM Role if err := s.reconcileControlPlaneIAMRole(); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition) // EKS Cluster if err := s.reconcileCluster(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.EKSControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.EKSControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) // EKS Addons if err := s.reconcileAddons(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition, ekscontrolplanev1.EKSAddonsConfiguredFailedReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition, ekscontrolplanev1.EKSAddonsConfiguredFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks addons") } conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition) // EKS Identity Provider if err := s.reconcileIdentityProvider(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1.EKSIdentityProviderConfiguredFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1.EKSIdentityProviderConfiguredFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks identity provider") } conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition) @@ -100,6 +100,7 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { expinfrav1.IAMNodegroupRolesReadyCondition, expinfrav1.IAMNodegroupRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return err @@ -112,6 +113,7 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { expinfrav1.EKSNodegroupReadyCondition, expinfrav1.EKSNodegroupReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return err diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 30e51adf29..b6ef89682f 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -54,6 +54,7 @@ func (s *FargateService) Reconcile() (reconcile.Result, error) { expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.IAMFargateRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return reconcile.Result{}, err @@ -73,6 +74,7 @@ func (s *FargateService) Reconcile() (reconcile.Result, error) { clusterv1.ReadyCondition, expinfrav1.EKSFargateReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return reconcile.Result{}, err @@ -169,6 +171,7 @@ func (s *FargateService) ReconcileDelete() (reconcile.Result, error) { clusterv1.ReadyCondition, expinfrav1.EKSFargateReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) return reconcile.Result{}, err @@ -185,6 +188,7 @@ func (s *FargateService) ReconcileDelete() (reconcile.Result, error) { expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.IAMFargateRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, + "%s", err.Error(), ) } diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index 763d14b494..10fd8e1fed 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -150,7 +150,7 @@ func (s *NodegroupService) remoteAccess() (*eks.RemoteAccessConfig, error) { // SourceSecurityGroups is validated to be empty if PublicAccess is true // but just in case we use an empty list to take advantage of the documented // API behavior - var sSGs = []string{} + sSGs := []string{} if !pool.RemoteAccess.Public { sSGs = pool.RemoteAccess.SourceSecurityGroups @@ -539,7 +539,7 @@ func (s *NodegroupService) reconcileNodegroup(ctx context.Context) error { if annotations.ReplicasManagedByExternalAutoscaler(s.scope.MachinePool) { // Set MachinePool replicas to the node group DesiredCapacity - ngDesiredCapacity := int32(aws.Int64Value(ng.ScalingConfig.DesiredSize)) + ngDesiredCapacity := int32(aws.Int64Value(ng.ScalingConfig.DesiredSize)) //#nosec G115 if *s.scope.MachinePool.Spec.Replicas != ngDesiredCapacity { s.scope.Info("Setting MachinePool replicas to node group DesiredCapacity", "local", *s.scope.MachinePool.Spec.Replicas, @@ -608,7 +608,7 @@ func (s *NodegroupService) setStatus(ng *eks.Nodegroup) error { var replicas int32 var providerIDList []string for _, group := range groups.AutoScalingGroups { - replicas += int32(len(group.Instances)) + replicas += int32(len(group.Instances)) //#nosec G115 for _, instance := range group.Instances { providerIDList = append(providerIDList, fmt.Sprintf("aws:///%s/%s", *instance.AvailabilityZone, *instance.InstanceId)) } diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index c7beb578d4..9442284555 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -599,7 +599,7 @@ func (s *Service) deleteAPIServerELB() error { s.scope.Debug("deleting load balancer", "name", elbName) if err := s.deleteClassicELB(elbName); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -705,7 +705,7 @@ func (s *Service) deleteExistingNLB(lbSpec *infrav1.AWSLoadBalancerSpec) error { } s.scope.Debug("deleting load balancer", "name", name) if err := s.deleteLB(lb.ARN); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -1615,7 +1615,7 @@ func (s *Service) reconcileTargetGroupsAndListeners(lbARN string, spec *infrav1. var listener *elbv2.Listener for _, l := range existingListeners.Listeners { - if l.DefaultActions != nil && len(l.DefaultActions) > 0 && *l.DefaultActions[0].TargetGroupArn == *group.TargetGroupArn { + if len(l.DefaultActions) > 0 && *l.DefaultActions[0].TargetGroupArn == *group.TargetGroupArn { listener = l break } diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index e97024fad7..aacfe97b44 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -32,56 +32,56 @@ func (s *Service) ReconcileNetwork() (err error) { // VPC. if err := s.reconcileVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) // Secondary CIDR if err := s.associateSecondaryCidr(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // Subnets. if err := s.reconcileSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // Internet Gateways. if err := s.reconcileInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // Carrier Gateway. if err := s.reconcileCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // NAT Gateways. if err := s.reconcileNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // Routing tables. if err := s.reconcileRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } // VPC Endpoints. if err := s.reconcileVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } @@ -118,7 +118,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -130,7 +130,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -142,7 +142,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -159,7 +159,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -167,7 +167,7 @@ func (s *Service) DeleteNetwork() (err error) { // Carrier Gateway. if s.scope.VPC().CarrierGatewayID != nil { if err := s.deleteCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -180,7 +180,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -192,7 +192,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") @@ -200,7 +200,7 @@ func (s *Service) DeleteNetwork() (err error) { // Secondary CIDR. conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") if err := s.disassociateSecondaryCidr(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -211,7 +211,7 @@ func (s *Service) DeleteNetwork() (err error) { } if err := s.deleteVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index f1f82193cc..26116163c7 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -329,7 +329,7 @@ func (s *Service) DeleteSecurityGroups() error { sg := clusterGroups[i] current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -341,7 +341,7 @@ func (s *Service) DeleteSecurityGroups() error { } if err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) return err } conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") diff --git a/pkg/internal/cidr/cidr.go b/pkg/internal/cidr/cidr.go index 30f0ee4596..e9039f0546 100644 --- a/pkg/internal/cidr/cidr.go +++ b/pkg/internal/cidr/cidr.go @@ -45,7 +45,7 @@ func SplitIntoSubnetsIPv4(cidrBlock string, numSubnets int) ([]*net.IPNet, error return nil, errors.Errorf("cidr %s cannot accommodate %d subnets", cidrBlock, numSubnets) } - var subnets []*net.IPNet + subnets := make([]*net.IPNet, 0, numSubnets) for i := 0; i < numSubnets; i++ { ip4 := parent.IP.To4() if ip4 == nil { @@ -53,7 +53,7 @@ func SplitIntoSubnetsIPv4(cidrBlock string, numSubnets int) ([]*net.IPNet, error } n := binary.BigEndian.Uint32(ip4) - n += uint32(i) << uint(32-modifiedNetworkLen) + n += uint32(i) << uint(32-modifiedNetworkLen) //#nosec G115 subnetIP := make(net.IP, len(ip4)) binary.BigEndian.PutUint32(subnetIP, n) @@ -89,9 +89,7 @@ func SplitIntoSubnetsIPv6(cidrBlock string, numSubnets int) ([]*net.IPNet, error } // update the prefix to 64. ipv6CidrBlock.Mask = net.CIDRMask(64, 128) - var ( - subnets []*net.IPNet - ) + var subnets []*net.IPNet for i := 0; i < numSubnets; i++ { ipv6CidrBlock.IP[subnetIDLocation]++ newIP := net.ParseIP(ipv6CidrBlock.IP.String()) diff --git a/pkg/rosa/helpers.go b/pkg/rosa/helpers.go index f5f8cd1817..e73493a703 100644 --- a/pkg/rosa/helpers.go +++ b/pkg/rosa/helpers.go @@ -35,6 +35,6 @@ func handleErr(res *ocmerrors.Error, err error) error { "Go to https://www.redhat.com/wapps/tnc/ackrequired?site=ocm&event=register\n" + "Once you accept the terms, you will need to retry the action that was blocked." } - errType := errors.ErrorType(res.Status()) + errType := errors.ErrorType(res.Status()) //#nosec G115 return errType.Set(errors.Errorf("%s", msg)) } diff --git a/test/e2e/shared/aws.go b/test/e2e/shared/aws.go index 31a6ac283a..7a22146e84 100644 --- a/test/e2e/shared/aws.go +++ b/test/e2e/shared/aws.go @@ -1207,7 +1207,7 @@ func GetVPCByName(e2eCtx *E2EContext, vpcName string) (*ec2.Vpc, error) { if err != nil { return nil, err } - if result.Vpcs == nil || len(result.Vpcs) == 0 { + if len(result.Vpcs) == 0 { return nil, awserrors.NewNotFound("Vpc not found") } return result.Vpcs[0], nil @@ -2291,7 +2291,7 @@ func GetMountTarget(e2eCtx *E2EContext, mountTargetID string) (*efs.MountTargetD if err != nil { return nil, err } - if result.MountTargets == nil || len(result.MountTargets) == 0 { + if len(result.MountTargets) == 0 { return nil, &efs.MountTargetNotFound{ ErrorCode: aws.String(efs.ErrCodeMountTargetNotFound), } diff --git a/test/e2e/shared/exec.go b/test/e2e/shared/exec.go index c57abaafe2..16425a658d 100644 --- a/test/e2e/shared/exec.go +++ b/test/e2e/shared/exec.go @@ -119,7 +119,7 @@ func commandsForMachine(ctx context.Context, e2eCtx *E2EContext, f *os.File, ins return } result, _, err := e.Expect(shellStart, 20*time.Second) - if err := os.WriteFile(logFile, []byte(result), os.ModePerm); err != nil { + if err := os.WriteFile(logFile, []byte(result), 0o600); err != nil { fmt.Fprintf(f, "error writing log file: err=%s", err) return } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 042411c611..c8a69837c4 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -468,7 +468,7 @@ func deleteMountTarget(mountTarget *efs.MountTargetDescription) { Expect(err).NotTo(HaveOccurred()) Eventually(func(g Gomega) { _, err = shared.GetMountTarget(e2eCtx, *mountTarget.MountTargetId) - g.Expect(err).ShouldNot(Equal(nil)) + g.Expect(err).ShouldNot(BeNil()) aerr, ok := err.(awserr.Error) g.Expect(ok).To(BeTrue()) g.Expect(aerr.Code()).To(Equal(efs.ErrCodeMountTargetNotFound))