Skip to content

Commit b5c6434

Browse files
authored
Merge pull request kubernetes#98850 from yangjunmyfm192085/run-test14
Structured Logging migration: modify volume and container part logs o…
2 parents 2f8a225 + 01a4e4f commit b5c6434

12 files changed

+111
-172
lines changed

pkg/kubelet/container/container_gc.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error {
8282
}
8383

8484
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
85-
klog.Infof("attempting to delete unused containers")
85+
klog.InfoS("Attempting to delete unused containers")
8686
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
8787
}

pkg/kubelet/container/helpers.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
"k8s.io/client-go/tools/record"
3232
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
3333
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
34-
"k8s.io/kubernetes/pkg/kubelet/util/format"
3534
hashutil "k8s.io/kubernetes/pkg/util/hash"
3635
"k8s.io/kubernetes/third_party/forked/golang/expansion"
3736
utilsnet "k8s.io/utils/net"
@@ -82,13 +81,13 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
8281
}
8382
// Check RestartPolicy for dead container
8483
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
85-
klog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
84+
klog.V(4).InfoS("Already ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name)
8685
return false
8786
}
8887
if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
8988
// Check the exit code.
9089
if status.ExitCode == 0 {
91-
klog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
90+
klog.V(4).InfoS("Already successfully ran container, do nothing", "pod", klog.KObj(pod), "containerName", container.Name)
9291
return false
9392
}
9493
}
@@ -341,7 +340,7 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
341340

342341
// Protect against a port name being used more than once in a container.
343342
if _, ok := names[name]; ok {
344-
klog.Warningf("Port name conflicted, %q is defined more than once", name)
343+
klog.InfoS("Port name conflicted, it is defined more than once", "portName", name)
345344
continue
346345
}
347346
ports = append(ports, pm)

pkg/kubelet/container/runtime.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ func BuildContainerID(typ, ID string) ContainerID {
204204
func ParseContainerID(containerID string) ContainerID {
205205
var id ContainerID
206206
if err := id.ParseString(containerID); err != nil {
207-
klog.Error(err)
207+
klog.ErrorS(err, "Parsing containerID failed")
208208
}
209209
return id
210210
}

pkg/kubelet/kubelet_volumes.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,11 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
6363
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
6464
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
6565
if err != nil {
66-
klog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err)
66+
klog.ErrorS(err, "Pod found, but error occurred during checking mounted volumes from disk", "podUID", podUID)
6767
return true
6868
}
6969
if len(volumePaths) > 0 {
70-
klog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths)
70+
klog.V(4).InfoS("Pod found, but volumes are still mounted on disk", "podUID", podUID, "volumePaths", volumePaths)
7171
return true
7272
}
7373

@@ -86,7 +86,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o
8686
if err != nil {
8787
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
8888
}
89-
klog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
89+
klog.V(10).InfoS("Using volume plugin for mount", "volumePluginName", plugin.GetPluginName(), "volumeName", spec.Name())
9090
return physicalMounter, nil
9191
}
9292

@@ -118,7 +118,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
118118
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
119119
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
120120
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
121-
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
121+
klog.V(3).InfoS("Orphaned pod found, but volumes are not cleaned up", "podUID", uid)
122122
continue
123123
}
124124

@@ -167,18 +167,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
167167
continue
168168
}
169169

170-
klog.V(3).Infof("Orphaned pod %q found, removing", uid)
170+
klog.V(3).InfoS("Orphaned pod found, removing", "podUID", uid)
171171
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
172-
klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
172+
klog.ErrorS(err, "Failed to remove orphaned pod dir", "podUID", uid)
173173
orphanRemovalErrors = append(orphanRemovalErrors, err)
174174
}
175175
}
176176

177177
logSpew := func(errs []error) {
178178
if len(errs) > 0 {
179-
klog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs))
179+
klog.ErrorS(errs[0], "There were many similar errors. Turn up verbosity to see them.", "numErrs", len(errs))
180180
for _, err := range errs {
181-
klog.V(5).Infof("Orphan pod: %v", err)
181+
klog.V(5).InfoS("Orphan pod", "err", err)
182182
}
183183
}
184184
}

pkg/kubelet/pod_container_deletor.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int)
4949
for {
5050
id := <-buffer
5151
if err := runtime.DeleteContainer(id); err != nil {
52-
klog.Warningf("[pod_container_deletor] DeleteContainer returned error for (id=%v): %v", id, err)
52+
klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err)
5353
}
5454
}
5555
}, 0, wait.NeverStop)
@@ -76,7 +76,7 @@ func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontain
7676
}(filterContainerID, podStatus)
7777

7878
if filterContainerID != "" && matchedContainer == nil {
79-
klog.Warningf("Container %q not found in pod's containers", filterContainerID)
79+
klog.InfoS("Container not found in pod's containers", "containerID", filterContainerID)
8080
return containerStatusbyCreatedList{}
8181
}
8282

@@ -110,7 +110,7 @@ func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, po
110110
select {
111111
case p.worker <- candidate.ID:
112112
default:
113-
klog.Warningf("Failed to issue the request to remove container %v", candidate.ID)
113+
klog.InfoS("Failed to issue the request to remove container", "containerID", candidate.ID)
114114
}
115115
}
116116
}

pkg/kubelet/pod_workers.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ import (
3333
"k8s.io/kubernetes/pkg/kubelet/events"
3434
"k8s.io/kubernetes/pkg/kubelet/eviction"
3535
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
36-
"k8s.io/kubernetes/pkg/kubelet/util/format"
3736
"k8s.io/kubernetes/pkg/kubelet/util/queue"
3837
)
3938

@@ -188,7 +187,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) {
188187
}
189188
if err != nil {
190189
// IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors
191-
klog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err)
190+
klog.ErrorS(err, "Error syncing pod, skipping", "pod", klog.KObj(update.Pod), "podUID", update.Pod.UID)
192191
}
193192
p.wrapUp(update.Pod.UID, err)
194193
}

pkg/kubelet/runonce.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -51,15 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
5151
// If the container logs directory does not exist, create it.
5252
if _, err := os.Stat(ContainerLogsDir); err != nil {
5353
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
54-
klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
54+
klog.ErrorS(err, "Failed to create directory", "path", ContainerLogsDir)
5555
}
5656
}
5757

5858
select {
5959
case u := <-updates:
60-
klog.Infof("processing manifest with %d pods", len(u.Pods))
60+
klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods))
6161
result, err := kl.runOnce(u.Pods, runOnceRetryDelay)
62-
klog.Infof("finished processing %d pods", len(u.Pods))
62+
klog.InfoS("Finished processing pods", "numPods", len(u.Pods))
6363
return result, err
6464
case <-time.After(runOnceManifestDelay):
6565
return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay)
@@ -85,27 +85,27 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
8585
}(pod)
8686
}
8787

88-
klog.Infof("Waiting for %d pods", len(admitted))
88+
klog.InfoS("Waiting for pods", "numPods", len(admitted))
8989
failedPods := []string{}
9090
for i := 0; i < len(admitted); i++ {
9191
res := <-ch
9292
results = append(results, res)
9393
if res.Err != nil {
94-
faliedContainerName, err := kl.getFailedContainers(res.Pod)
94+
failedContainerName, err := kl.getFailedContainers(res.Pod)
9595
if err != nil {
96-
klog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err)
96+
klog.InfoS("Unable to get failed containers' names for pod", "pod", klog.KObj(res.Pod), "err", err)
9797
} else {
98-
klog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName)
98+
klog.InfoS("Unable to start pod because container failed", "pod", klog.KObj(res.Pod), "containerName", failedContainerName)
9999
}
100100
failedPods = append(failedPods, format.Pod(res.Pod))
101101
} else {
102-
klog.Infof("started pod %q", format.Pod(res.Pod))
102+
klog.InfoS("Started pod", "pod", klog.KObj(res.Pod))
103103
}
104104
}
105105
if len(failedPods) > 0 {
106106
return results, fmt.Errorf("error running pods: %v", failedPods)
107107
}
108-
klog.Infof("%d pods started", len(pods))
108+
klog.InfoS("Pods started", "numPods", len(pods))
109109
return results, err
110110
}
111111

@@ -120,14 +120,14 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
120120
}
121121

122122
if kl.isPodRunning(pod, status) {
123-
klog.Infof("pod %q containers running", format.Pod(pod))
123+
klog.InfoS("Pod's containers running", "pod", klog.KObj(pod))
124124
return nil
125125
}
126-
klog.Infof("pod %q containers not running: syncing", format.Pod(pod))
126+
klog.InfoS("Pod's containers not running: syncing", "pod", klog.KObj(pod))
127127

128-
klog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
128+
klog.InfoS("Creating a mirror pod for static pod", "pod", klog.KObj(pod))
129129
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
130-
klog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
130+
klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod))
131131
}
132132
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
133133
if err = kl.syncPod(syncPodOptions{
@@ -142,7 +142,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
142142
return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries)
143143
}
144144
// TODO(proppy): health checking would be better than waiting + checking the state at the next iteration.
145-
klog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay)
145+
klog.InfoS("Pod's containers synced, waiting", "pod", klog.KObj(pod), "duration", delay)
146146
time.Sleep(delay)
147147
retry++
148148
delay *= runOnceRetryDelayBackoff
@@ -154,7 +154,7 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo
154154
for _, c := range pod.Spec.Containers {
155155
cs := status.FindContainerStatusByName(c.Name)
156156
if cs == nil || cs.State != kubecontainer.ContainerStateRunning {
157-
klog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod))
157+
klog.InfoS("Container not running", "pod", klog.KObj(pod), "containerName", c.Name)
158158
return false
159159
}
160160
}

pkg/kubelet/volume_host.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ func NewInitializedVolumePluginMgr(
7272
csiDriversSynced = csiDriverInformer.Informer().HasSynced
7373

7474
} else {
75-
klog.Warning("kubeClient is nil. Skip initialization of CSIDriverLister")
75+
klog.InfoS("KubeClient is nil. Skip initialization of CSIDriverLister")
7676
}
7777

7878
kvh := &kubeletVolumeHost{
@@ -176,13 +176,13 @@ func (kvh *kubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
176176
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
177177
func (kvh *kubeletVolumeHost) WaitForCacheSync() error {
178178
if kvh.csiDriversSynced == nil {
179-
klog.Error("csiDriversSynced not found on KubeletVolumeHost")
179+
klog.ErrorS(nil, "CsiDriversSynced not found on KubeletVolumeHost")
180180
return fmt.Errorf("csiDriversSynced not found on KubeletVolumeHost")
181181
}
182182

183183
synced := []cache.InformerSynced{kvh.csiDriversSynced}
184184
if !cache.WaitForCacheSync(wait.NeverStop, synced...) {
185-
klog.Warning("failed to wait for cache sync for CSIDriverLister")
185+
klog.InfoS("Failed to wait for cache sync for CSIDriverLister")
186186
return fmt.Errorf("failed to wait for cache sync for CSIDriverLister")
187187
}
188188

pkg/kubelet/volumemanager/cache/actual_state_of_world.go

Lines changed: 7 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -461,9 +461,7 @@ func (asw *actualStateOfWorld) addVolume(
461461
} else {
462462
// If volume object already exists, update the fields such as device path
463463
volumeObj.devicePath = devicePath
464-
klog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q",
465-
volumeName,
466-
devicePath)
464+
klog.V(2).InfoS("Volume is already added to attachedVolume list, update device path", "volumeName", volumeName, "path", devicePath)
467465
}
468466
asw.attachedVolumes[volumeName] = volumeObj
469467

@@ -530,9 +528,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsResized(
530528
podName,
531529
volumeName)
532530
}
533-
534-
klog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized",
535-
volumeName, podObj.outerVolumeSpecName, podName)
531+
klog.V(5).InfoS("Pod volume has been resized", "uniquePodName", podName, "volumeName", volumeName, "outerVolumeSpecName", podObj.outerVolumeSpecName)
536532
podObj.fsResizeRequired = false
537533
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
538534
return nil
@@ -548,12 +544,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired(
548544
asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec)
549545
if err != nil || volumePlugin == nil {
550546
// Log and continue processing
551-
klog.Errorf(
552-
"MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)",
553-
podObj.podName,
554-
podObj.podUID,
555-
volumeObj.volumeName,
556-
podObj.volumeSpec.Name())
547+
klog.ErrorS(nil, "MarkRemountRequired failed to FindPluginBySpec for volume", "uniquePodName", podObj.podName, "podUID", podObj.podUID, "volumeName", volumeName, "volumeSpecName", podObj.volumeSpec.Name())
557548
continue
558549
}
559550

@@ -572,33 +563,27 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired(
572563
defer asw.Unlock()
573564
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
574565
if !volumeExists {
575-
klog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName)
566+
klog.InfoS("MarkFSResizeRequired for volume failed as volume does not exist", "volumeName", volumeName)
576567
return
577568
}
578569

579570
podObj, podExists := volumeObj.mountedPods[podName]
580571
if !podExists {
581-
klog.Warningf("MarkFSResizeRequired for volume %s failed "+
582-
"as pod(%s) not exist", volumeName, podName)
572+
klog.InfoS("MarkFSResizeRequired for volume failed because the pod does not exist", "uniquePodName", podName, "volumeName", volumeName)
583573
return
584574
}
585575

586576
volumePlugin, err :=
587577
asw.volumePluginMgr.FindNodeExpandablePluginBySpec(podObj.volumeSpec)
588578
if err != nil || volumePlugin == nil {
589579
// Log and continue processing
590-
klog.Errorf(
591-
"MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)",
592-
podObj.podName,
593-
volumeObj.volumeName,
594-
podObj.volumeSpec.Name())
580+
klog.ErrorS(nil, "MarkFSResizeRequired failed to find expandable plugin for volume", "uniquePodName", podObj.podName, "volumeName", volumeObj.volumeName, "volumeSpecName", podObj.volumeSpec.Name())
595581
return
596582
}
597583

598584
if volumePlugin.RequiresFSResize() {
599585
if !podObj.fsResizeRequired {
600-
klog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize",
601-
volumeName, podObj.outerVolumeSpecName, podName)
586+
klog.V(3).InfoS("PVC volume of the pod requires file system resize", "uniquePodName", podName, "volumeName", volumeName, "outerVolumeSpecName", podObj.outerVolumeSpecName)
602587
podObj.fsResizeRequired = true
603588
}
604589
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj

0 commit comments

Comments
 (0)