Skip to content

Commit 85d2880

Browse files
committed
Extend log conventions, fix log keys, use upper case for logs
Signed-off-by: Stefan Büringer [email protected]
1 parent a066d37 commit 85d2880

File tree

56 files changed

+212
-188
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+212
-188
lines changed

bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -991,7 +991,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste
991991

992992
apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
993993
config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
994-
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint)
994+
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "apiServerEndpoint", apiServerEndpoint)
995995
}
996996

997997
// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
@@ -1029,39 +1029,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co
10291029
// then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster.
10301030
if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() {
10311031
config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
1032-
log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
1032+
log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "controlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
10331033
}
10341034

10351035
// If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name
10361036
if config.Spec.ClusterConfiguration.ClusterName == "" {
10371037
config.Spec.ClusterConfiguration.ClusterName = cluster.Name
1038-
log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
1038+
log.V(3).Info("Altering ClusterConfiguration.ClusterName", "clusterName", config.Spec.ClusterConfiguration.ClusterName)
10391039
}
10401040

10411041
// If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined
10421042
if cluster.Spec.ClusterNetwork != nil {
10431043
if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" {
10441044
config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain
1045-
log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
1045+
log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "dnsDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
10461046
}
10471047
if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" &&
10481048
cluster.Spec.ClusterNetwork.Services != nil &&
10491049
len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
10501050
config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String()
1051-
log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
1051+
log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "serviceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
10521052
}
10531053
if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" &&
10541054
cluster.Spec.ClusterNetwork.Pods != nil &&
10551055
len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 {
10561056
config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String()
1057-
log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
1057+
log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "podSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
10581058
}
10591059
}
10601060

10611061
// If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined
10621062
if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil {
10631063
config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version
1064-
log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
1064+
log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "kubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
10651065
}
10661066
}
10671067

bootstrap/kubeadm/main.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ func main() {
273273
setupWebhooks(mgr)
274274
setupReconcilers(ctx, mgr)
275275

276-
setupLog.Info("starting manager", "version", version.Get().String())
276+
setupLog.Info("Starting manager", "version", version.Get().String())
277277
if err := mgr.Start(ctx); err != nil {
278278
setupLog.Error(err, "problem running manager")
279279
os.Exit(1)

cmd/clusterctl/client/alpha/machinedeployment.go

+6-4
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ import (
2828
"k8s.io/apimachinery/pkg/labels"
2929
"k8s.io/apimachinery/pkg/runtime"
3030
"k8s.io/apimachinery/pkg/types"
31+
"k8s.io/klog/v2"
3132
"sigs.k8s.io/controller-runtime/pkg/client"
3233

3334
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -134,26 +135,27 @@ func getMachineSetsForDeployment(ctx context.Context, proxy cluster.Proxy, md *c
134135
filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items))
135136
for idx := range machineSets.Items {
136137
ms := &machineSets.Items[idx]
138+
log := log.WithValues("MachineSet", klog.KObj(ms))
137139

138140
// Skip this MachineSet if its controller ref is not pointing to this MachineDeployment
139141
if !metav1.IsControlledBy(ms, md) {
140-
log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment", "machineset", ms.Name)
142+
log.V(5).Info("Skipping MachineSet, controller ref does not match MachineDeployment")
141143
continue
142144
}
143145

144146
selector, err := metav1.LabelSelectorAsSelector(&md.Spec.Selector)
145147
if err != nil {
146-
log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector", "machineset", ms.Name)
148+
log.V(5).Info("Skipping MachineSet, failed to get label selector from spec selector")
147149
continue
148150
}
149151
// If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything.
150152
if selector.Empty() {
151-
log.V(5).Info("Skipping MachineSet as the selector is empty", "machineset", ms.Name)
153+
log.V(5).Info("Skipping MachineSet as the selector is empty")
152154
continue
153155
}
154156
// Skip this MachineSet if selector does not match
155157
if !selector.Matches(labels.Set(ms.Labels)) {
156-
log.V(5).Info("Skipping MachineSet, label mismatch", "machineset", ms.Name)
158+
log.V(5).Info("Skipping MachineSet, label mismatch")
157159
continue
158160
}
159161
filtered = append(filtered, ms)

cmd/clusterctl/client/cluster/cert_manager.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ func (cm *certManagerClient) EnsureInstalled(ctx context.Context) error {
177177
func (cm *certManagerClient) install(ctx context.Context, version string, objs []unstructured.Unstructured) error {
178178
log := logf.Log
179179

180-
log.Info("Installing cert-manager", "Version", version)
180+
log.Info("Installing cert-manager", "version", version)
181181

182182
// Install all cert-manager manifests
183183
createCertManagerBackoff := newWriteBackoff()
@@ -282,7 +282,7 @@ func (cm *certManagerClient) EnsureLatestVersion(ctx context.Context) error {
282282
// delete the cert-manager version currently installed (because it should be upgraded);
283283
// NOTE: CRDs, and namespace are preserved in order to avoid deletion of user objects;
284284
// web-hooks are preserved to avoid a user attempting to CREATE a cert-manager resource while the upgrade is in progress.
285-
log.Info("Deleting cert-manager", "Version", currentVersion)
285+
log.Info("Deleting cert-manager", "version", currentVersion)
286286
if err := cm.deleteObjs(ctx, objs); err != nil {
287287
return err
288288
}

cmd/clusterctl/client/cluster/client.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ func retryWithExponentialBackoff(ctx context.Context, opts wait.Backoff, operati
227227
i++
228228
if err := operation(ctx); err != nil {
229229
if i < opts.Steps {
230-
log.V(5).Info("Retrying with backoff", "Cause", err.Error())
230+
log.V(5).Info("Retrying with backoff", "cause", err.Error())
231231
return false, nil
232232
}
233233
return false, err

cmd/clusterctl/client/cluster/components.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/apimachinery/pkg/runtime/schema"
3131
kerrors "k8s.io/apimachinery/pkg/util/errors"
3232
"k8s.io/apimachinery/pkg/util/sets"
33+
"k8s.io/klog/v2"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435

3536
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -135,7 +136,7 @@ func (p *providerComponents) createObj(ctx context.Context, obj unstructured.Uns
135136

136137
func (p *providerComponents) Delete(ctx context.Context, options DeleteOptions) error {
137138
log := logf.Log
138-
log.Info("Deleting", "Provider", options.Provider.Name, "Version", options.Provider.Version, "Namespace", options.Provider.Namespace)
139+
log.Info("Deleting", "Provider", klog.KObj(&options.Provider), "providerVersion", options.Provider.Version)
139140

140141
// Fetch all the components belonging to a provider.
141142
// We want that the delete operation is able to clean-up everything.
@@ -264,7 +265,7 @@ func (p *providerComponents) DeleteWebhookNamespace(ctx context.Context) error {
264265

265266
func (p *providerComponents) ValidateNoObjectsExist(ctx context.Context, provider clusterctlv1.Provider) error {
266267
log := logf.Log
267-
log.Info("Checking for CRs", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace)
268+
log.Info("Checking for CRs", "Provider", klog.KObj(&provider), "providerVersion", provider.Version)
268269

269270
proxyClient, err := p.proxy.NewClient(ctx)
270271
if err != nil {

cmd/clusterctl/client/cluster/crd_migration.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/apimachinery/pkg/util/rand"
3131
"k8s.io/apimachinery/pkg/util/sets"
3232
"k8s.io/apimachinery/pkg/util/wait"
33+
"k8s.io/klog/v2"
3334
"sigs.k8s.io/controller-runtime/pkg/client"
3435

3536
"sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme"
@@ -117,7 +118,7 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes
117118
// Note: We want to migrate objects to new storage versions as soon as possible
118119
// to prevent unnecessary conversion webhook calls.
119120
if currentStatusStoredVersions.Len() == 1 && currentCRD.Status.StoredVersions[0] == currentStorageVersion {
120-
log.V(2).Info("CRD migration check passed", "name", newCRD.Name)
121+
log.V(2).Info("CRD migration check passed", "CustomResourceDefinition", klog.KObj(newCRD))
121122
return false, nil
122123
}
123124

@@ -141,8 +142,8 @@ func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomRes
141142
}
142143

143144
func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error {
144-
log := logf.Log
145-
log.Info("Migrating CRs, this operation may take a while...", "kind", crd.Spec.Names.Kind)
145+
log := logf.Log.WithValues("CustomResourceDefinition", klog.KObj(crd))
146+
log.Info("Migrating CRs, this operation may take a while...")
146147

147148
list := &unstructured.UnstructuredList{}
148149
list.SetGroupVersionKind(schema.GroupVersionKind{
@@ -182,7 +183,7 @@ func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextens
182183
}
183184
}
184185

185-
log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i), "kind", crd.Spec.Names.Kind)
186+
log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i))
186187
return nil
187188
}
188189

cmd/clusterctl/client/cluster/installer.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,16 @@ func (i *providerInstaller) Install(ctx context.Context, opts InstallOptions) ([
108108

109109
func installComponentsAndUpdateInventory(ctx context.Context, components repository.Components, providerComponents ComponentsClient, providerInventory InventoryClient) error {
110110
log := logf.Log
111-
log.Info("Installing", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace())
111+
log.Info("Installing", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace())
112112

113113
inventoryObject := components.InventoryObject()
114114

115-
log.V(1).Info("Creating objects", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace())
115+
log.V(1).Info("Creating objects", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace())
116116
if err := providerComponents.Create(ctx, components.Objs()); err != nil {
117117
return err
118118
}
119119

120-
log.V(1).Info("Creating inventory entry", "Provider", components.ManifestLabel(), "Version", components.Version(), "TargetNamespace", components.TargetNamespace())
120+
log.V(1).Info("Creating inventory entry", "provider", components.ManifestLabel(), "version", components.Version(), "targetNamespace", components.TargetNamespace())
121121
return providerInventory.Create(ctx, inventoryObject)
122122
}
123123

cmd/clusterctl/client/cluster/objectgraph.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error {
462462
continue
463463
}
464464

465-
log.V(5).Info(typeMeta.Kind, "Count", len(objList.Items))
465+
log.V(5).Info(typeMeta.Kind, "count", len(objList.Items))
466466
for i := range objList.Items {
467467
obj := objList.Items[i]
468468
if err := o.addObj(&obj); err != nil {
@@ -471,7 +471,7 @@ func (o *objectGraph) Discovery(ctx context.Context, namespace string) error {
471471
}
472472
}
473473

474-
log.V(1).Info("Total objects", "Count", len(o.uidToNode))
474+
log.V(1).Info("Total objects", "count", len(o.uidToNode))
475475

476476
// Completes the graph by searching for soft ownership relations such as secrets linked to the cluster
477477
// by a naming convention (without any explicit OwnerReference).

cmd/clusterctl/client/cluster/upgrader.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"k8s.io/apimachinery/pkg/util/sets"
2727
"k8s.io/apimachinery/pkg/util/version"
2828
"k8s.io/apimachinery/pkg/util/wait"
29+
"k8s.io/klog/v2"
2930
"k8s.io/utils/ptr"
3031
"sigs.k8s.io/controller-runtime/pkg/client"
3132

@@ -452,7 +453,7 @@ func (u *providerUpgrader) doUpgrade(ctx context.Context, upgradePlan *UpgradePl
452453

453454
func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clusterctlv1.Provider) error {
454455
log := logf.Log
455-
log.Info("Scaling down", "Provider", provider.Name, "Version", provider.Version, "Namespace", provider.Namespace)
456+
log.Info("Scaling down", "Provider", klog.KObj(&provider), "providerVersion", &provider.Version)
456457

457458
cs, err := u.proxy.NewClient(ctx)
458459
if err != nil {
@@ -473,7 +474,8 @@ func (u *providerUpgrader) scaleDownProvider(ctx context.Context, provider clust
473474

474475
// Scale down provider Deployments.
475476
for _, deployment := range deploymentList.Items {
476-
log.V(5).Info("Scaling down", "Deployment", deployment.Name, "Namespace", deployment.Namespace)
477+
deployment := deployment
478+
log.V(5).Info("Scaling down", "Deployment", klog.KObj(&deployment))
477479
if err := scaleDownDeployment(ctx, cs, deployment); err != nil {
478480
return err
479481
}

cmd/clusterctl/client/config/reader_viper.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ func (v *viperReader) Init(ctx context.Context, path string) error {
137137
if err := viper.ReadInConfig(); err != nil {
138138
return err
139139
}
140-
log.V(5).Info("Using configuration", "File", viper.ConfigFileUsed())
140+
log.V(5).Info("Using configuration", "file", viper.ConfigFileUsed())
141141
return nil
142142
}
143143

cmd/clusterctl/client/repository/clusterclass_client.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -81,13 +81,13 @@ func (cc *clusterClassClient) Get(ctx context.Context, name, targetNamespace str
8181
}
8282

8383
if rawArtifact == nil {
84-
log.V(5).Info("Fetching", "File", filename, "Provider", cc.provider.Name(), "Type", cc.provider.Type(), "Version", version)
84+
log.V(5).Info("Fetching", "file", filename, "provider", cc.provider.Name(), "type", cc.provider.Type(), "version", version)
8585
rawArtifact, err = cc.repository.GetFile(ctx, version, filename)
8686
if err != nil {
8787
return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", filename, cc.provider.ManifestLabel())
8888
}
8989
} else {
90-
log.V(1).Info("Using", "Override", filename, "Provider", cc.provider.ManifestLabel(), "Version", version)
90+
log.V(1).Info("Using", "override", filename, "provider", cc.provider.ManifestLabel(), "version", version)
9191
}
9292

9393
return NewTemplate(TemplateInput{

cmd/clusterctl/client/repository/components_client.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -91,13 +91,13 @@ func (f *componentsClient) getRawBytes(ctx context.Context, options *ComponentsO
9191
}
9292

9393
if file == nil {
94-
log.V(5).Info("Fetching", "File", path, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", options.Version)
94+
log.V(5).Info("Fetching", "file", path, "provider", f.provider.Name(), "type", f.provider.Type(), "version", options.Version)
9595
file, err = f.repository.GetFile(ctx, options.Version, path)
9696
if err != nil {
9797
return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", path, f.provider.ManifestLabel())
9898
}
9999
} else {
100-
log.Info("Using", "Override", path, "Provider", f.provider.ManifestLabel(), "Version", options.Version)
100+
log.Info("Using", "override", path, "provider", f.provider.ManifestLabel(), "version", options.Version)
101101
}
102102
return file, nil
103103
}

cmd/clusterctl/client/repository/metadata_client.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -75,13 +75,13 @@ func (f *metadataClient) Get(ctx context.Context) (*clusterctlv1.Metadata, error
7575
return nil, err
7676
}
7777
if file == nil {
78-
log.V(5).Info("Fetching", "File", metadataFile, "Provider", f.provider.Name(), "Type", f.provider.Type(), "Version", version)
78+
log.V(5).Info("Fetching", "file", metadataFile, "provider", f.provider.Name(), "type", f.provider.Type(), "version", version)
7979
file, err = f.repository.GetFile(ctx, version, metadataFile)
8080
if err != nil {
8181
return nil, errors.Wrapf(err, "failed to read %q from the repository for provider %q", metadataFile, f.provider.ManifestLabel())
8282
}
8383
} else {
84-
log.V(1).Info("Using", "Override", metadataFile, "Provider", f.provider.ManifestLabel(), "Version", version)
84+
log.V(1).Info("Using", "override", metadataFile, "provider", f.provider.ManifestLabel(), "version", version)
8585
}
8686

8787
// Convert the yaml into a typed object

cmd/clusterctl/client/repository/overrides.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func getLocalOverride(info *newOverrideInput) ([]byte, error) {
111111
log := logf.Log
112112

113113
overridePath, err := newOverride(info).Path()
114-
log.V(5).Info("Potential override file", "SearchFile", overridePath, "Provider", info.provider.ManifestLabel(), "Version", info.version)
114+
log.V(5).Info("Potential override file", "searchFile", overridePath, "provider", info.provider.ManifestLabel(), "version", info.version)
115115

116116
if err != nil {
117117
return nil, err

cmd/clusterctl/client/repository/template_client.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -90,13 +90,13 @@ func (c *templateClient) Get(ctx context.Context, flavor, targetNamespace string
9090
}
9191

9292
if rawArtifact == nil {
93-
log.V(5).Info("Fetching", "File", name, "Provider", c.provider.Name(), "Type", c.provider.Type(), "Version", version)
93+
log.V(5).Info("Fetching", "file", name, "provider", c.provider.Name(), "type", c.provider.Type(), "version", version)
9494
rawArtifact, err = c.repository.GetFile(ctx, version, name)
9595
if err != nil {
9696
return nil, errors.Wrapf(err, "failed to read %q from provider's repository %q", name, c.provider.ManifestLabel())
9797
}
9898
} else {
99-
log.V(1).Info("Using", "Override", name, "Provider", c.provider.ManifestLabel(), "Version", version)
99+
log.V(1).Info("Using", "override", name, "provider", c.provider.ManifestLabel(), "version", version)
100100
}
101101

102102
return NewTemplate(TemplateInput{

cmd/clusterctl/cmd/version_checker.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -124,14 +124,14 @@ func (v *versionChecker) Check(ctx context.Context) (string, error) {
124124

125125
// if we are using a dirty dev build, just log it out
126126
if strings.HasSuffix(cliVer.String(), "-dirty") {
127-
log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version)
127+
log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version)
128128
return "", nil
129129
}
130130

131131
// if the cli version is a dev build off of the latest available release,
132132
// the just log it out as informational.
133133
if strings.HasPrefix(cliVer.String(), latestVersion.String()) && gitVersionRegEx.MatchString(cliVer.String()) {
134-
log.V(1).Info("⚠️ Using a development build of clusterctl.", "CLIVersion", cliVer.String(), "LatestGithubRelease", release.Version)
134+
log.V(1).Info("⚠️ Using a development build of clusterctl.", "cliVersion", cliVer.String(), "latestGithubRelease", release.Version)
135135
return "", nil
136136
}
137137

controllers/remote/cluster_cache_tracker.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -580,7 +580,7 @@ func (t *ClusterCacheTracker) Watch(ctx context.Context, input WatchInput) error
580580

581581
if accessor.watches.Has(input.Name) {
582582
log := ctrl.LoggerFrom(ctx)
583-
log.V(6).Info("Watch already exists", "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name), "name", input.Name)
583+
log.V(6).Info(fmt.Sprintf("Watch %s already exists", input.Name), "Cluster", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
584584
return nil
585585
}
586586

0 commit comments

Comments
 (0)