Skip to content

Commit 1471d2c

Browse files
deads2kQiWang19
andcommittedJul 27, 2021
add kubelet skew check for MCO upgradeable
Co-authored-by: Qi Wang <[email protected]> Signed-off-by: Qi Wang <[email protected]>
1 parent 1bcbc37 commit 1471d2c

File tree

4 files changed

+543
-6
lines changed

4 files changed

+543
-6
lines changed
 

Diff for: ‎cmd/machine-config-operator/start.go

+1
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ func runStartCmd(cmd *cobra.Command, args []string) {
7575
ctrlctx.ClientBuilder.APIExtClientOrDie(componentName),
7676
ctrlctx.ClientBuilder.ConfigClientOrDie(componentName),
7777
ctrlctx.OpenShiftKubeAPIServerKubeNamespacedInformerFactory.Core().V1().ConfigMaps(),
78+
ctrlctx.KubeInformerFactory.Core().V1().Nodes(),
7879
)
7980

8081
ctrlctx.NamespacedInformerFactory.Start(ctrlctx.Stop)

Diff for: ‎pkg/operator/operator.go

+7
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ type Operator struct {
8181
clusterCmLister corelisterv1.ConfigMapLister
8282
proxyLister configlistersv1.ProxyLister
8383
oseKubeAPILister corelisterv1.ConfigMapLister
84+
nodeLister corelisterv1.NodeLister
8485
dnsLister configlistersv1.DNSLister
8586

8687
crdListerSynced cache.InformerSynced
@@ -98,6 +99,7 @@ type Operator struct {
9899
clusterRoleBindingInformerSynced cache.InformerSynced
99100
proxyListerSynced cache.InformerSynced
100101
oseKubeAPIListerSynced cache.InformerSynced
102+
nodeListerSynced cache.InformerSynced
101103
dnsListerSynced cache.InformerSynced
102104

103105
// queue only ever has one item, but it has nice error handling backoff/retry semantics
@@ -131,6 +133,7 @@ func New(
131133
apiExtClient apiextclientset.Interface,
132134
configClient configclientset.Interface,
133135
oseKubeAPIInformer coreinformersv1.ConfigMapInformer,
136+
nodeInformer coreinformersv1.NodeInformer,
134137
) *Operator {
135138
eventBroadcaster := record.NewBroadcaster()
136139
eventBroadcaster.StartLogging(glog.Infof)
@@ -163,6 +166,7 @@ func New(
163166
mcpInformer.Informer(),
164167
proxyInformer.Informer(),
165168
oseKubeAPIInformer.Informer(),
169+
nodeInformer.Informer(),
166170
dnsInformer.Informer(),
167171
} {
168172
i.AddEventHandler(optr.eventHandler())
@@ -182,6 +186,8 @@ func New(
182186
optr.proxyListerSynced = proxyInformer.Informer().HasSynced
183187
optr.oseKubeAPILister = oseKubeAPIInformer.Lister()
184188
optr.oseKubeAPIListerSynced = oseKubeAPIInformer.Informer().HasSynced
189+
optr.nodeLister = nodeInformer.Lister()
190+
optr.nodeListerSynced = nodeInformer.Informer().HasSynced
185191

186192
optr.serviceAccountInformerSynced = serviceAccountInfomer.Informer().HasSynced
187193
optr.clusterRoleInformerSynced = clusterRoleInformer.Informer().HasSynced
@@ -235,6 +241,7 @@ func (optr *Operator) Run(workers int, stopCh <-chan struct{}) {
235241
optr.networkListerSynced,
236242
optr.proxyListerSynced,
237243
optr.oseKubeAPIListerSynced,
244+
optr.nodeListerSynced,
238245
optr.mcpListerSynced,
239246
optr.mcListerSynced,
240247
optr.dnsListerSynced) {

Diff for: ‎pkg/operator/status.go

+169
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ import (
44
"context"
55
"encoding/json"
66
"fmt"
7+
"strconv"
8+
"strings"
79

810
"github.com/golang/glog"
911
configv1 "github.com/openshift/api/config/v1"
@@ -16,6 +18,7 @@ import (
1618
"k8s.io/apimachinery/pkg/labels"
1719

1820
mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
21+
v1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
1922
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
2023
)
2124

@@ -243,6 +246,13 @@ func (optr *Operator) syncDegradedStatus(ierr syncError) (err error) {
243246
return optr.updateStatus(co, coStatus)
244247
}
245248

249+
const (
250+
skewUnchecked = "KubeletSkewUnchecked"
251+
skewSupported = "KubeletSkewSupported"
252+
skewUnsupported = "KubeletSkewUnsupported"
253+
skewPresent = "KubeletSkewPresent"
254+
)
255+
246256
// syncUpgradeableStatus applies the new condition to the mco's ClusterOperator object.
247257
func (optr *Operator) syncUpgradeableStatus() error {
248258
co, err := optr.fetchClusterOperator()
@@ -289,9 +299,168 @@ func (optr *Operator) syncUpgradeableStatus() error {
289299
coStatus.Message = "One or more machine config pools are updating, please see `oc get mcp` for further details"
290300
}
291301

302+
// don't overwrite status if updating or degraded
303+
if !updating && !degraded {
304+
skewStatus, status, err := optr.isKubeletSkewSupported(pools)
305+
if err != nil {
306+
glog.Errorf("Error checking version skew: %v, kubelet skew status: %v, status reason: %v, status message: %v", err, skewStatus, status.Reason, status.Message)
307+
coStatus.Reason = status.Reason
308+
coStatus.Message = status.Message
309+
return optr.updateStatus(co, coStatus)
310+
}
311+
switch skewStatus {
312+
case skewUnchecked:
313+
coStatus.Reason = status.Reason
314+
coStatus.Message = status.Message
315+
return optr.updateStatus(co, coStatus)
316+
case skewUnsupported:
317+
coStatus.Reason = status.Reason
318+
coStatus.Message = status.Message
319+
mcoObjectRef := &corev1.ObjectReference{
320+
Kind: co.Kind,
321+
Name: co.Name,
322+
Namespace: co.Namespace,
323+
UID: co.GetUID(),
324+
}
325+
glog.Infof("kubelet skew status: %v, status reason: %v", skewStatus, status.Reason)
326+
optr.eventRecorder.Eventf(mcoObjectRef, corev1.EventTypeWarning, coStatus.Reason, coStatus.Message)
327+
return optr.updateStatus(co, coStatus)
328+
case skewPresent:
329+
coStatus.Reason = status.Reason
330+
coStatus.Message = status.Message
331+
glog.Infof("kubelet skew status: %v, status reason: %v", skewStatus, status.Reason)
332+
return optr.updateStatus(co, coStatus)
333+
}
334+
}
292335
return optr.updateStatus(co, coStatus)
293336
}
294337

338+
// isKubeletSkewSupported checks the version skew of kube-apiserver and node kubelet version.
339+
// Returns the skew status. version skew > 2 is not supported.
340+
func (optr *Operator) isKubeletSkewSupported(pools []*v1.MachineConfigPool) (skewStatus string, coStatus configv1.ClusterOperatorStatusCondition, err error) {
341+
coStatus = configv1.ClusterOperatorStatusCondition{}
342+
kubeAPIServerStatus, err := optr.configClient.ConfigV1().ClusterOperators().Get(context.TODO(), "kube-apiserver", metav1.GetOptions{})
343+
if err != nil {
344+
coStatus.Reason = skewUnchecked
345+
coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err)
346+
return skewUnchecked, coStatus, err
347+
}
348+
// looks like
349+
// - name: kube-apiserver
350+
// version: 1.21.0-rc.0
351+
kubeAPIServerVersion := ""
352+
for _, version := range kubeAPIServerStatus.Status.Versions {
353+
if version.Name != "kube-apiserver" {
354+
continue
355+
}
356+
kubeAPIServerVersion = version.Version
357+
break
358+
}
359+
if kubeAPIServerVersion == "" {
360+
err = fmt.Errorf("kube-apiserver does not yet have a version")
361+
coStatus.Reason = skewUnchecked
362+
coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err.Error())
363+
return skewUnchecked, coStatus, err
364+
}
365+
kubeAPIServerMinorVersion, err := getMinorKubeletVersion(kubeAPIServerVersion)
366+
if err != nil {
367+
coStatus.Reason = skewUnchecked
368+
coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err)
369+
return skewUnchecked, coStatus, err
370+
}
371+
var (
372+
lastError error
373+
kubeletVersion string
374+
)
375+
nodes, err := optr.GetAllManagedNodes(pools)
376+
if err != nil {
377+
err = fmt.Errorf("getting all managed nodes failed: %v", err)
378+
coStatus.Reason = skewUnchecked
379+
coStatus.Message = fmt.Sprintf("An error occurred when getting all the managed nodes: %v", err.Error())
380+
}
381+
for _, node := range nodes {
382+
// looks like kubeletVersion: v1.21.0-rc.0+6143dea
383+
kubeletVersion = node.Status.NodeInfo.KubeletVersion
384+
if kubeletVersion == "" {
385+
continue
386+
}
387+
nodeMinorVersion, err := getMinorKubeletVersion(kubeletVersion)
388+
if err != nil {
389+
lastError = err
390+
continue
391+
}
392+
if nodeMinorVersion+2 < kubeAPIServerMinorVersion {
393+
coStatus.Reason = skewUnsupported
394+
coStatus.Message = fmt.Sprintf("One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least %v.", getMinimalSkewSupportNodeVersion(kubeAPIServerVersion))
395+
return skewUnsupported, coStatus, nil
396+
}
397+
if nodeMinorVersion+2 == kubeAPIServerMinorVersion {
398+
coStatus.Reason = skewPresent
399+
coStatus.Message = fmt.Sprintf("Current kubelet version %v will not be supported by newer kube-apiserver. Please upgrade the kubelet first if plan to upgrade the kube-apiserver", kubeletVersion)
400+
return skewPresent, coStatus, nil
401+
}
402+
}
403+
if kubeletVersion == "" {
404+
err = fmt.Errorf("kubelet does not yet have a version")
405+
coStatus.Reason = skewUnchecked
406+
coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err.Error())
407+
return skewUnchecked, coStatus, err
408+
}
409+
if lastError != nil {
410+
coStatus.Reason = skewUnchecked
411+
coStatus.Message = fmt.Sprintf("An error occurred when checking kubelet version skew: %v", err)
412+
return skewUnchecked, coStatus, lastError
413+
}
414+
return skewSupported, coStatus, nil
415+
}
416+
417+
// GetAllManagedNodes returns the nodes managed by MCO
418+
func (optr *Operator) GetAllManagedNodes(pools []*v1.MachineConfigPool) ([]*corev1.Node, error) {
419+
nodes := []*corev1.Node{}
420+
for _, pool := range pools {
421+
selector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector)
422+
if err != nil {
423+
return nil, fmt.Errorf("label selector for pool %v failed %v", pool.Name, err)
424+
}
425+
poolNodes, err := optr.nodeLister.List(selector)
426+
if err != nil {
427+
return nil, fmt.Errorf("could not list nodes for pool %v with error %v", pool.Name, err)
428+
}
429+
nodes = append(nodes, poolNodes...)
430+
}
431+
return nodes, nil
432+
}
433+
434+
// getMinorKubeletVersion parses the minor version number of kubelet
435+
func getMinorKubeletVersion(version string) (int, error) {
436+
tokens := strings.Split(version, ".")
437+
if len(tokens) < 2 {
438+
return 0, fmt.Errorf("incorrect version syntax: %q", version)
439+
}
440+
minorVersion, err := strconv.ParseInt(tokens[1], 10, 32)
441+
if err != nil {
442+
return 0, err
443+
}
444+
return int(minorVersion), nil
445+
}
446+
447+
// getMinimalSkewSupportNodeVersion returns the minimal supported node kubelet version.
448+
func getMinimalSkewSupportNodeVersion(version string) string {
449+
// drop the pre-release and commit hash
450+
if strings.Contains(version, "-") {
451+
version = version[:strings.Index(version, "-")]
452+
}
453+
if strings.Contains(version, "+") {
454+
version = version[:strings.Index(version, "+")]
455+
}
456+
tokens := strings.Split(version, ".")
457+
if minorVersion, err := strconv.ParseInt(tokens[1], 10, 32); err == nil {
458+
tokens[1] = strconv.Itoa(int(minorVersion) - 2)
459+
return strings.Join(tokens, ".")
460+
}
461+
return version
462+
}
463+
295464
func (optr *Operator) fetchClusterOperator() (*configv1.ClusterOperator, error) {
296465
co, err := optr.configClient.ConfigV1().ClusterOperators().Get(context.TODO(), optr.name, metav1.GetOptions{})
297466
if meta.IsNoMatchError(err) {

Diff for: ‎pkg/operator/status_test.go

+366-6
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@ import (
77
"reflect"
88
"testing"
99

10+
corelisterv1 "k8s.io/client-go/listers/core/v1"
11+
clientgotesting "k8s.io/client-go/testing"
12+
"k8s.io/client-go/tools/cache"
13+
1014
corev1 "k8s.io/api/core/v1"
1115
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1216
"k8s.io/apimachinery/pkg/labels"
@@ -20,6 +24,7 @@ import (
2024
cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
2125
mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
2226
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
27+
"github.com/openshift/machine-config-operator/test/helpers"
2328
)
2429

2530
func TestIsMachineConfigPoolConfigurationValid(t *testing.T) {
@@ -152,12 +157,23 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) {
152157
}
153158
}
154159

155-
type mockMCPLister struct{}
160+
type mockMCPLister struct {
161+
pools []*mcfgv1.MachineConfigPool
162+
}
156163

157164
func (mcpl *mockMCPLister) List(selector labels.Selector) (ret []*mcfgv1.MachineConfigPool, err error) {
158-
return nil, nil
165+
return mcpl.pools, nil
159166
}
160167
func (mcpl *mockMCPLister) Get(name string) (ret *mcfgv1.MachineConfigPool, err error) {
168+
if mcpl.pools == nil {
169+
return nil, nil
170+
}
171+
for _, pool := range mcpl.pools {
172+
if pool.Name == name {
173+
return pool, nil
174+
}
175+
176+
}
161177
return nil, nil
162178
}
163179

@@ -550,7 +566,24 @@ func TestOperatorSyncStatus(t *testing.T) {
550566
eventRecorder: &record.FakeRecorder{},
551567
}
552568
optr.vStore = newVersionStore()
553-
optr.mcpLister = &mockMCPLister{}
569+
optr.mcpLister = &mockMCPLister{
570+
pools: []*mcfgv1.MachineConfigPool{
571+
helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"),
572+
helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"),
573+
},
574+
}
575+
576+
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
577+
optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer)
578+
nodeIndexer.Add(&corev1.Node{
579+
ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}},
580+
Status: corev1.NodeStatus{
581+
NodeInfo: corev1.NodeSystemInfo{
582+
KubeletVersion: "v1.21",
583+
},
584+
},
585+
})
586+
554587
coName := fmt.Sprintf("test-%s", uuid.NewUUID())
555588
co := &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: coName}}
556589
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse})
@@ -559,6 +592,14 @@ func TestOperatorSyncStatus(t *testing.T) {
559592
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown})
560593
co.Status.Versions = append(co.Status.Versions, configv1.OperandVersion{Name: "operator", Version: "test-version"})
561594
optr.name = coName
595+
kasOperator := &configv1.ClusterOperator{
596+
ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"},
597+
Status: configv1.ClusterOperatorStatus{
598+
Versions: []configv1.OperandVersion{
599+
{Name: "kube-apiserver", Version: "1.21"},
600+
},
601+
},
602+
}
562603

563604
for j, sync := range testCase.syncs {
564605
optr.inClusterBringup = sync.inClusterBringUp
@@ -567,7 +608,7 @@ func TestOperatorSyncStatus(t *testing.T) {
567608
} else {
568609
optr.vStore.Set("operator", "test-version")
569610
}
570-
optr.configClient = fakeconfigclientset.NewSimpleClientset(co)
611+
optr.configClient = fakeconfigclientset.NewSimpleClientset(co, kasOperator)
571612
err := optr.syncAll(sync.syncFuncs)
572613
if sync.expectOperatorFail {
573614
assert.NotNil(t, err, "test case %d, sync call %d, expected an error", idx, j)
@@ -597,12 +638,179 @@ func TestInClusterBringUpStayOnErr(t *testing.T) {
597638
}
598639
optr.vStore = newVersionStore()
599640
optr.vStore.Set("operator", "test-version")
600-
optr.mcpLister = &mockMCPLister{}
641+
optr.mcpLister = &mockMCPLister{
642+
pools: []*mcfgv1.MachineConfigPool{
643+
helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"),
644+
helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"),
645+
},
646+
}
647+
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
648+
optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer)
649+
nodeIndexer.Add(&corev1.Node{
650+
ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}},
651+
Status: corev1.NodeStatus{
652+
NodeInfo: corev1.NodeSystemInfo{
653+
KubeletVersion: "v1.21",
654+
},
655+
},
656+
})
657+
co := &configv1.ClusterOperator{}
658+
kasOperator := &configv1.ClusterOperator{
659+
ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"},
660+
Status: configv1.ClusterOperatorStatus{
661+
Versions: []configv1.OperandVersion{
662+
{Name: "kube-apiserver", Version: "1.21"},
663+
},
664+
},
665+
}
666+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse})
667+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse})
668+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse})
669+
optr.configClient = fakeconfigclientset.NewSimpleClientset(co, kasOperator)
670+
optr.inClusterBringup = true
671+
672+
fn1 := func(config *renderConfig) error {
673+
return errors.New("mocked fn1")
674+
}
675+
err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
676+
assert.NotNil(t, err, "expected syncAll to fail")
677+
678+
assert.True(t, optr.inClusterBringup)
679+
680+
fn1 = func(config *renderConfig) error {
681+
return nil
682+
}
683+
err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
684+
assert.Nil(t, err, "expected syncAll to pass")
685+
686+
assert.False(t, optr.inClusterBringup)
687+
}
688+
689+
func TestKubeletSkewUnSupported(t *testing.T) {
690+
kasOperator := &configv1.ClusterOperator{
691+
ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"},
692+
Status: configv1.ClusterOperatorStatus{
693+
Versions: []configv1.OperandVersion{
694+
{Name: "kube-apiserver", Version: "1.21"},
695+
},
696+
},
697+
}
698+
optr := &Operator{
699+
eventRecorder: &record.FakeRecorder{},
700+
}
701+
optr.vStore = newVersionStore()
702+
optr.vStore.Set("operator", "test-version")
703+
optr.mcpLister = &mockMCPLister{
704+
pools: []*mcfgv1.MachineConfigPool{
705+
helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"),
706+
helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"),
707+
},
708+
}
709+
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
710+
optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer)
711+
nodeIndexer.Add(&corev1.Node{
712+
ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}},
713+
Status: corev1.NodeStatus{
714+
NodeInfo: corev1.NodeSystemInfo{
715+
KubeletVersion: "v1.18",
716+
},
717+
},
718+
})
719+
720+
co := &configv1.ClusterOperator{}
721+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse})
722+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse})
723+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse})
724+
fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator)
725+
optr.configClient = fakeClient
726+
optr.inClusterBringup = true
727+
728+
fn1 := func(config *renderConfig) error {
729+
return errors.New("mocked fn1")
730+
}
731+
err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
732+
assert.NotNil(t, err, "expected syncAll to fail")
733+
734+
assert.True(t, optr.inClusterBringup)
735+
736+
fn1 = func(config *renderConfig) error {
737+
return nil
738+
}
739+
err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
740+
assert.Nil(t, err, "expected syncAll to pass")
741+
742+
assert.False(t, optr.inClusterBringup)
743+
744+
var lastUpdate clientgotesting.UpdateAction
745+
for _, action := range fakeClient.Actions() {
746+
if action.GetVerb() == "update" {
747+
lastUpdate = action.(clientgotesting.UpdateAction)
748+
}
749+
}
750+
if lastUpdate == nil {
751+
t.Fatal("missing update")
752+
}
753+
operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator)
754+
var upgradeable *configv1.ClusterOperatorStatusCondition
755+
for _, condition := range operatorStatus.Status.Conditions {
756+
if condition.Type == configv1.OperatorUpgradeable {
757+
upgradeable = &condition
758+
break
759+
}
760+
}
761+
if upgradeable == nil {
762+
t.Fatal("missing condition")
763+
}
764+
if upgradeable.Status != configv1.ConditionTrue {
765+
t.Fatal(upgradeable)
766+
}
767+
if upgradeable.Message != "One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least 1.19." {
768+
t.Fatal(upgradeable)
769+
}
770+
if upgradeable.Reason != "KubeletSkewUnsupported" {
771+
t.Fatal(upgradeable)
772+
}
773+
}
774+
775+
func TestCustomPoolKubeletSkewUnSupported(t *testing.T) {
776+
customSelector := metav1.AddLabelToSelector(&metav1.LabelSelector{}, "node-role/custom", "")
777+
kasOperator := &configv1.ClusterOperator{
778+
ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"},
779+
Status: configv1.ClusterOperatorStatus{
780+
Versions: []configv1.OperandVersion{
781+
{Name: "kube-apiserver", Version: "1.21"},
782+
},
783+
},
784+
}
785+
optr := &Operator{
786+
eventRecorder: &record.FakeRecorder{},
787+
}
788+
optr.vStore = newVersionStore()
789+
optr.vStore.Set("operator", "test-version")
790+
optr.mcpLister = &mockMCPLister{
791+
pools: []*mcfgv1.MachineConfigPool{
792+
helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"),
793+
helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"),
794+
helpers.NewMachineConfigPool("custom", nil, customSelector, "v0"),
795+
},
796+
}
797+
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
798+
optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer)
799+
nodeIndexer.Add(&corev1.Node{
800+
ObjectMeta: metav1.ObjectMeta{Name: "custom", Labels: map[string]string{"node-role/custom": ""}},
801+
Status: corev1.NodeStatus{
802+
NodeInfo: corev1.NodeSystemInfo{
803+
KubeletVersion: "v1.18",
804+
},
805+
},
806+
})
807+
601808
co := &configv1.ClusterOperator{}
602809
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse})
603810
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse})
604811
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse})
605-
optr.configClient = fakeconfigclientset.NewSimpleClientset(co)
812+
fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator)
813+
optr.configClient = fakeClient
606814
optr.inClusterBringup = true
607815

608816
fn1 := func(config *renderConfig) error {
@@ -620,4 +828,156 @@ func TestInClusterBringUpStayOnErr(t *testing.T) {
620828
assert.Nil(t, err, "expected syncAll to pass")
621829

622830
assert.False(t, optr.inClusterBringup)
831+
832+
var lastUpdate clientgotesting.UpdateAction
833+
for _, action := range fakeClient.Actions() {
834+
if action.GetVerb() == "update" {
835+
lastUpdate = action.(clientgotesting.UpdateAction)
836+
}
837+
}
838+
if lastUpdate == nil {
839+
t.Fatal("missing update")
840+
}
841+
operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator)
842+
var upgradeable *configv1.ClusterOperatorStatusCondition
843+
for _, condition := range operatorStatus.Status.Conditions {
844+
if condition.Type == configv1.OperatorUpgradeable {
845+
upgradeable = &condition
846+
break
847+
}
848+
}
849+
if upgradeable == nil {
850+
t.Fatal("missing condition")
851+
}
852+
if upgradeable.Status != configv1.ConditionTrue {
853+
t.Fatal(upgradeable)
854+
}
855+
if upgradeable.Message != "One or more nodes have an unsupported kubelet version skew. Please see `oc get nodes` for details and upgrade all nodes so that they have a kubelet version of at least 1.19." {
856+
t.Fatal(upgradeable)
857+
}
858+
if upgradeable.Reason != "KubeletSkewUnsupported" {
859+
t.Fatal(upgradeable)
860+
}
861+
}
862+
863+
func TestKubeletSkewSupported(t *testing.T) {
864+
kasOperator := &configv1.ClusterOperator{
865+
ObjectMeta: metav1.ObjectMeta{Name: "kube-apiserver"},
866+
Status: configv1.ClusterOperatorStatus{
867+
Versions: []configv1.OperandVersion{
868+
{Name: "kube-apiserver", Version: "1.21"},
869+
},
870+
},
871+
}
872+
optr := &Operator{
873+
eventRecorder: &record.FakeRecorder{},
874+
}
875+
optr.vStore = newVersionStore()
876+
optr.vStore.Set("operator", "test-version")
877+
optr.mcpLister = &mockMCPLister{
878+
pools: []*mcfgv1.MachineConfigPool{
879+
helpers.NewMachineConfigPool("master", nil, helpers.MasterSelector, "v0"),
880+
helpers.NewMachineConfigPool("workers", nil, helpers.WorkerSelector, "v0"),
881+
},
882+
}
883+
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
884+
optr.nodeLister = corelisterv1.NewNodeLister(nodeIndexer)
885+
nodeIndexer.Add(&corev1.Node{
886+
ObjectMeta: metav1.ObjectMeta{Name: "first-node", Labels: map[string]string{"node-role/worker": ""}},
887+
Status: corev1.NodeStatus{
888+
NodeInfo: corev1.NodeSystemInfo{
889+
KubeletVersion: "v1.20",
890+
},
891+
},
892+
})
893+
894+
co := &configv1.ClusterOperator{}
895+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionFalse})
896+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse})
897+
cov1helpers.SetStatusCondition(&co.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse})
898+
fakeClient := fakeconfigclientset.NewSimpleClientset(co, kasOperator)
899+
optr.configClient = fakeClient
900+
optr.inClusterBringup = true
901+
902+
fn1 := func(config *renderConfig) error {
903+
return errors.New("mocked fn1")
904+
}
905+
err := optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
906+
assert.NotNil(t, err, "expected syncAll to fail")
907+
908+
assert.True(t, optr.inClusterBringup)
909+
910+
fn1 = func(config *renderConfig) error {
911+
return nil
912+
}
913+
err = optr.syncAll([]syncFunc{{name: "mock1", fn: fn1}})
914+
assert.Nil(t, err, "expected syncAll to pass")
915+
916+
assert.False(t, optr.inClusterBringup)
917+
918+
var lastUpdate clientgotesting.UpdateAction
919+
for _, action := range fakeClient.Actions() {
920+
if action.GetVerb() == "update" {
921+
lastUpdate = action.(clientgotesting.UpdateAction)
922+
}
923+
}
924+
if lastUpdate == nil {
925+
t.Fatal("missing update")
926+
}
927+
operatorStatus := lastUpdate.GetObject().(*configv1.ClusterOperator)
928+
var upgradeable *configv1.ClusterOperatorStatusCondition
929+
for _, condition := range operatorStatus.Status.Conditions {
930+
if condition.Type == configv1.OperatorUpgradeable {
931+
upgradeable = &condition
932+
break
933+
}
934+
}
935+
if upgradeable == nil {
936+
t.Fatal("missing condition")
937+
}
938+
if upgradeable.Status != configv1.ConditionTrue {
939+
t.Fatal(upgradeable)
940+
}
941+
if upgradeable.Message != "" {
942+
t.Fatal(upgradeable)
943+
}
944+
if upgradeable.Reason != "AsExpected" {
945+
t.Fatal(upgradeable)
946+
}
947+
}
948+
949+
func TestGetMinorKubeletVersion(t *testing.T) {
950+
tcs := []struct {
951+
version string
952+
minor int
953+
expectNilErr bool
954+
}{
955+
{"v1.20.1", 20, true},
956+
{"v1.20.1+abc0", 20, true},
957+
{"v1.20.1+0123", 20, true},
958+
{"v1.20.1-rc", 20, true},
959+
{"v1.20.1-rc.1", 20, true},
960+
{"v1.20.1-rc+abc123", 20, true},
961+
{"v1.20.1-rc.0+abc123", 20, true},
962+
{"v1.20.1", 20, true},
963+
{"1.20.1", 20, true},
964+
{"1.20", 20, true},
965+
{"12", 0, false},
966+
{".xy", 0, false},
967+
{"1.xy.1", 0, false},
968+
}
969+
for _, tc := range tcs {
970+
minorV, err := getMinorKubeletVersion(tc.version)
971+
if tc.expectNilErr && err != nil {
972+
t.Errorf("test %q failed: unexpected error %v", tc.version, err)
973+
continue
974+
}
975+
if !tc.expectNilErr && err == nil {
976+
t.Errorf("test %q failed: expected error, got nil ", tc.version)
977+
continue
978+
}
979+
if tc.expectNilErr {
980+
assert.Equal(t, tc.minor, minorV, fmt.Sprintf("failed test %q", tc.version))
981+
}
982+
}
623983
}

0 commit comments

Comments
 (0)
Please sign in to comment.