Skip to content

Commit d761e1b

Browse files
authored
Merge pull request #6150 from sbueringer/pr-cr-log-keys
🌱 Improve key value pairs consistency in logging
2 parents ac42ba2 + 1a286fd commit d761e1b

37 files changed

+233
-254
lines changed

bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

+21-18
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
"k8s.io/apimachinery/pkg/runtime"
3232
"k8s.io/apimachinery/pkg/types"
3333
kerrors "k8s.io/apimachinery/pkg/util/errors"
34+
"k8s.io/klog/v2"
3435
"k8s.io/utils/pointer"
3536
ctrl "sigs.k8s.io/controller-runtime"
3637
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -101,7 +102,7 @@ type Scope struct {
101102
// SetupWithManager sets up the reconciler with the Manager.
102103
func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
103104
if r.KubeadmInitLock == nil {
104-
r.KubeadmInitLock = locking.NewControlPlaneInitMutex(ctrl.LoggerFrom(ctx).WithName("init-locker"), mgr.GetClient())
105+
r.KubeadmInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient())
105106
}
106107
if r.remoteClientGetter == nil {
107108
r.remoteClientGetter = remote.NewClusterClient
@@ -172,7 +173,7 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
172173
if configOwner == nil {
173174
return ctrl.Result{}, nil
174175
}
175-
log = log.WithValues("kind", configOwner.GetKind(), "version", configOwner.GetResourceVersion(), "name", configOwner.GetName())
176+
log = log.WithValues(configOwner.LowerCamelCaseKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())
176177

177178
// Lookup the cluster the config owner is associated with
178179
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
@@ -190,6 +191,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
190191
return ctrl.Result{}, err
191192
}
192193

194+
ctx = ctrl.LoggerInto(ctx, log.WithValues("cluster", klog.KObj(cluster)))
195+
193196
if annotations.IsPaused(cluster, config) {
194197
log.Info("Reconciliation is paused for this object")
195198
return ctrl.Result{}, nil
@@ -321,14 +324,14 @@ func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Co
321324
return ctrl.Result{}, err
322325
}
323326
if shouldRotate {
324-
log.V(2).Info("Creating new bootstrap token")
327+
log.Info("Creating new bootstrap token, the existing one should be rotated")
325328
token, err := createToken(ctx, remoteClient, r.TokenTTL)
326329
if err != nil {
327330
return ctrl.Result{}, errors.Wrapf(err, "failed to create new bootstrap token")
328331
}
329332

330333
config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
331-
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "Token", token)
334+
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")
332335

333336
// update the bootstrap data
334337
return r.joinWorker(ctx, scope)
@@ -378,7 +381,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
378381
}
379382
}()
380383

381-
scope.Info("Creating BootstrapData for the init control plane")
384+
scope.Info("Creating BootstrapData for the first control plane")
382385

383386
// Nb. in this case JoinConfiguration should not be defined by users, but in case of misconfigurations, CABPK simply ignore it
384387

@@ -495,6 +498,8 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
495498
}
496499

497500
func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) {
501+
scope.Info("Creating BootstrapData for the worker node")
502+
498503
certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath)
499504
err := certificates.Lookup(
500505
ctx,
@@ -534,8 +539,6 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope)
534539
return ctrl.Result{}, errors.New("Machine is a Worker, but JoinConfiguration.ControlPlane is set in the KubeadmConfig object")
535540
}
536541

537-
scope.Info("Creating BootstrapData for the worker node")
538-
539542
verbosityFlag := ""
540543
if scope.Config.Spec.Verbosity != nil {
541544
verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
@@ -592,6 +595,8 @@ func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope)
592595
}
593596

594597
func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) {
598+
scope.Info("Creating BootstrapData for the joining control plane")
599+
595600
if !scope.ConfigOwner.IsControlPlaneMachine() {
596601
return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind())
597602
}
@@ -635,8 +640,6 @@ func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *S
635640
return ctrl.Result{}, err
636641
}
637642

638-
scope.Info("Creating BootstrapData for the join control plane")
639-
640643
verbosityFlag := ""
641644
if scope.Config.Spec.Verbosity != nil {
642645
verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
@@ -889,7 +892,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste
889892

890893
apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
891894
config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
892-
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken", "APIServerEndpoint", apiServerEndpoint)
895+
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint)
893896
}
894897

895898
// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
@@ -905,7 +908,7 @@ func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluste
905908
}
906909

907910
config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
908-
log.Info("Altering JoinConfiguration.Discovery.BootstrapToken")
911+
log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")
909912
}
910913

911914
// If the BootstrapToken does not contain any CACertHashes then force skip CA Verification
@@ -927,39 +930,39 @@ func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Co
927930
// then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster.
928931
if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() {
929932
config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
930-
log.Info("Altering ClusterConfiguration", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
933+
log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
931934
}
932935

933936
// If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name
934937
if config.Spec.ClusterConfiguration.ClusterName == "" {
935938
config.Spec.ClusterConfiguration.ClusterName = cluster.Name
936-
log.Info("Altering ClusterConfiguration", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
939+
log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
937940
}
938941

939942
// If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined
940943
if cluster.Spec.ClusterNetwork != nil {
941944
if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" {
942945
config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain
943-
log.Info("Altering ClusterConfiguration", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
946+
log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
944947
}
945948
if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" &&
946949
cluster.Spec.ClusterNetwork.Services != nil &&
947950
len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
948951
config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String()
949-
log.Info("Altering ClusterConfiguration", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
952+
log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
950953
}
951954
if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" &&
952955
cluster.Spec.ClusterNetwork.Pods != nil &&
953956
len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 {
954957
config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String()
955-
log.Info("Altering ClusterConfiguration", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
958+
log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
956959
}
957960
}
958961

959962
// If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined
960963
if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil {
961964
config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version
962-
log.Info("Altering ClusterConfiguration", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
965+
log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
963966
}
964967
}
965968

@@ -998,7 +1001,7 @@ func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope
9981001
if !apierrors.IsAlreadyExists(err) {
9991002
return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
10001003
}
1001-
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", secret.Name, "KubeadmConfig", scope.Config.Name)
1004+
log.Info("bootstrap data secret for KubeadmConfig already exists, updating", "secret", klog.KObj(secret))
10021005
if err := r.Client.Update(ctx, secret); err != nil {
10031006
return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
10041007
}

bootstrap/kubeadm/internal/locking/control_plane_init_mutex.go

+12-14
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,12 @@ import (
2222
"encoding/json"
2323
"fmt"
2424

25-
"github.com/go-logr/logr"
2625
"github.com/pkg/errors"
2726
corev1 "k8s.io/api/core/v1"
2827
apierrors "k8s.io/apimachinery/pkg/api/errors"
2928
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29+
"k8s.io/klog/v2"
30+
ctrl "sigs.k8s.io/controller-runtime"
3031
"sigs.k8s.io/controller-runtime/pkg/client"
3132

3233
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -36,14 +37,12 @@ const semaphoreInformationKey = "lock-information"
3637

3738
// ControlPlaneInitMutex uses a ConfigMap to synchronize cluster initialization.
3839
type ControlPlaneInitMutex struct {
39-
log logr.Logger
4040
client client.Client
4141
}
4242

4343
// NewControlPlaneInitMutex returns a lock that can be held by a control plane node before init.
44-
func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPlaneInitMutex {
44+
func NewControlPlaneInitMutex(client client.Client) *ControlPlaneInitMutex {
4545
return &ControlPlaneInitMutex{
46-
log: log,
4746
client: client,
4847
}
4948
}
@@ -52,7 +51,7 @@ func NewControlPlaneInitMutex(log logr.Logger, client client.Client) *ControlPla
5251
func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool {
5352
sema := newSemaphore()
5453
cmName := configMapName(cluster.Name)
55-
log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName, "machine-name", machine.Name)
54+
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
5655
err := c.client.Get(ctx, client.ObjectKey{
5756
Namespace: cluster.Namespace,
5857
Name: cmName,
@@ -61,12 +60,12 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
6160
case apierrors.IsNotFound(err):
6261
break
6362
case err != nil:
64-
log.Error(err, "Failed to acquire lock")
63+
log.Error(err, "Failed to acquire init lock")
6564
return false
6665
default: // Successfully found an existing config map.
6766
info, err := sema.information()
6867
if err != nil {
69-
log.Error(err, "Failed to get information about the existing lock")
68+
log.Error(err, "Failed to get information about the existing init lock")
7069
return false
7170
}
7271
// The machine requesting the lock is the machine that created the lock, therefore the lock is acquired.
@@ -79,31 +78,31 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
7978
Namespace: cluster.Namespace,
8079
Name: info.MachineName,
8180
}, &clusterv1.Machine{}); err != nil {
82-
log.Error(err, "Failed to get machine holding ControlPlane lock")
81+
log.Error(err, "Failed to get machine holding init lock")
8382
if apierrors.IsNotFound(err) {
8483
c.Unlock(ctx, cluster)
8584
}
8685
}
87-
log.Info("Waiting on another machine to initialize", "init-machine", info.MachineName)
86+
log.Info(fmt.Sprintf("Waiting for Machine %s to initialize", info.MachineName))
8887
return false
8988
}
9089

9190
// Adds owner reference, namespace and name
9291
sema.setMetadata(cluster)
9392
// Adds the additional information
9493
if err := sema.setInformation(&information{MachineName: machine.Name}); err != nil {
95-
log.Error(err, "Failed to acquire lock while setting semaphore information")
94+
log.Error(err, "Failed to acquire init lock while setting semaphore information")
9695
return false
9796
}
9897

9998
log.Info("Attempting to acquire the lock")
10099
err = c.client.Create(ctx, sema.ConfigMap)
101100
switch {
102101
case apierrors.IsAlreadyExists(err):
103-
log.Info("Cannot acquire the lock. The lock has been acquired by someone else")
102+
log.Info("Cannot acquire the init lock. The init lock has been acquired by someone else")
104103
return false
105104
case err != nil:
106-
log.Error(err, "Error acquiring the lock")
105+
log.Error(err, "Error acquiring the init lock")
107106
return false
108107
default:
109108
return true
@@ -114,8 +113,7 @@ func (c *ControlPlaneInitMutex) Lock(ctx context.Context, cluster *clusterv1.Clu
114113
func (c *ControlPlaneInitMutex) Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool {
115114
sema := newSemaphore()
116115
cmName := configMapName(cluster.Name)
117-
log := c.log.WithValues("namespace", cluster.Namespace, "cluster-name", cluster.Name, "configmap-name", cmName)
118-
log.Info("Checking for lock")
116+
log := ctrl.LoggerFrom(ctx, "configMap", klog.KRef(cluster.Namespace, cmName))
119117
err := c.client.Get(ctx, client.ObjectKey{
120118
Namespace: cluster.Namespace,
121119
Name: cmName,

0 commit comments

Comments
 (0)