Skip to content

OCPNODE-3029: WIP: handle required minimum kubelet version featuregate rollout #4929

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
@@ -194,7 +194,6 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
github.com/ettle/strcase v0.2.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
@@ -349,4 +348,8 @@ require (
sigs.k8s.io/yaml v1.4.0
)

replace k8s.io/kube-openapi => github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0
replace (
github.com/openshift/api => github.com/haircommander/api v0.0.0-20250326145233-4e692eb778c9
github.com/openshift/library-go => github.com/haircommander/library-go v0.0.0-20250320175542-74e1e32dfa76
k8s.io/kube-openapi => github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0
)
10 changes: 4 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
@@ -175,8 +175,6 @@ github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtz
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
@@ -353,6 +351,10 @@ github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+
github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/haircommander/api v0.0.0-20250326145233-4e692eb778c9 h1:jIXShZeM5WspAIoEUrn4LiX7wUGgRB9DbhJQ2ja6/8s=
github.com/haircommander/api v0.0.0-20250326145233-4e692eb778c9/go.mod h1:nEKlkdw/X9V+7gyPxl/ZeP7T14q8e1EvmQ/XsIHVZ90=
github.com/haircommander/library-go v0.0.0-20250320175542-74e1e32dfa76 h1:wR5ZTd7JUYf6rrYe2X3Cq4KqoXvu0kOB8GBvrH7y+GM=
github.com/haircommander/library-go v0.0.0-20250320175542-74e1e32dfa76/go.mod h1:Z7PxlyxJDS5OZBapcjOi9ARH1eCmhCn1jt3v7apEyuU=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -513,14 +515,10 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openshift/api v0.0.0-20250402094343-3d7abe90f97e h1:+nJrGJMAhBH8yhXe7u3z44IWA/kHtzgjmpOAry+aeb4=
github.com/openshift/api v0.0.0-20250402094343-3d7abe90f97e/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ=
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo=
github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA=
github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70 h1:VLj8CU9q009xlMuR4wNcqDX4lVa2Ji3u/iYnBLHtQUc=
github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70/go.mod h1:TQx0VEhZ/92qRXIMDu2Wg4bUPmw5HRNE6wpSZ+IsP0Y=
github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b h1:oXzC1N6E9gw76/WH2gEA8GEHvuq09wuVQ9GoCuR8GF4=
github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b/go.mod h1:l9/qeKZuAmYUMl0yicJlbkPGDsIycGhwxOvOAWyaP0E=
github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
9 changes: 8 additions & 1 deletion pkg/controller/kubelet-config/kubelet_config_bootstrap.go
Original file line number Diff line number Diff line change
@@ -26,6 +26,13 @@ func RunKubeletBootstrap(templateDir string, kubeletConfigs []*mcfgv1.KubeletCon
if nodeConfig == nil {
nodeConfig = createNewDefaultNodeconfig()
}

// TODO FIXME: ignoring min kubelet version writeback for now
featureGates, _, err := generateFeatureMap(featureGateAccess, openshiftOnlyFeatureGates...)
if err != nil {
return nil, fmt.Errorf("could not generate features map: %w", err)
}

for _, kubeletConfig := range kubeletConfigs {
// use selector since label matching part of a KubeletConfig is not handled during the bootstrap
selector, err := metav1.LabelSelectorAsSelector(kubeletConfig.Spec.MachineConfigPoolSelector)
@@ -41,7 +48,7 @@ func RunKubeletBootstrap(templateDir string, kubeletConfigs []*mcfgv1.KubeletCon
}
role := pool.Name

originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(controllerConfig, templateDir, role, featureGateAccess, apiServer)
originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(controllerConfig, templateDir, role, featureGates, apiServer)
if err != nil {
return nil, err
}
79 changes: 71 additions & 8 deletions pkg/controller/kubelet-config/kubelet_config_controller.go
Original file line number Diff line number Diff line change
@@ -42,6 +42,7 @@ import (
mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
"github.com/openshift/machine-config-operator/pkg/apihelpers"
"github.com/openshift/machine-config-operator/pkg/constants"
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
mtmpl "github.com/openshift/machine-config-operator/pkg/controller/template"
"github.com/openshift/machine-config-operator/pkg/version"
@@ -420,7 +421,7 @@ func (ctrl *Controller) handleFeatureErr(err error, key string) {

// generateOriginalKubeletConfigWithFeatureGates generates a KubeletConfig and ensure the correct feature gates are set
// based on the given FeatureGate.
func generateOriginalKubeletConfigWithFeatureGates(cc *mcfgv1.ControllerConfig, templatesDir, role string, featureGateAccess featuregates.FeatureGateAccess, apiServer *configv1.APIServer) (*kubeletconfigv1beta1.KubeletConfiguration, error) {
func generateOriginalKubeletConfigWithFeatureGates(cc *mcfgv1.ControllerConfig, templatesDir, role string, featureGates map[string]bool, apiServer *configv1.APIServer) (*kubeletconfigv1beta1.KubeletConfiguration, error) {
originalKubeletIgn, err := generateOriginalKubeletConfigIgn(cc, templatesDir, role, apiServer)
if err != nil {
return nil, fmt.Errorf("could not generate the original Kubelet config ignition: %w", err)
@@ -437,11 +438,6 @@ func generateOriginalKubeletConfigWithFeatureGates(cc *mcfgv1.ControllerConfig,
return nil, fmt.Errorf("could not deserialize the Kubelet source: %w", err)
}

featureGates, err := generateFeatureMap(featureGateAccess, openshiftOnlyFeatureGates...)
if err != nil {
return nil, fmt.Errorf("could not generate features map: %w", err)
}

// Merge in Feature Gates.
// If they are the same, this will be a no-op
if err := mergo.Merge(&originalKubeConfig.FeatureGates, featureGates, mergo.WithOverride); err != nil {
@@ -533,7 +529,7 @@ func (ctrl *Controller) addAnnotation(cfg *mcfgv1.KubeletConfig, annotationKey,
// This function is not meant to be invoked concurrently with the same key.
//
//nolint:gocyclo
func (ctrl *Controller) syncKubeletConfig(key string) error {
func (ctrl *Controller) syncKubeletConfig(key string) (retErr error) {
startTime := time.Now()
klog.V(4).Infof("Started syncing kubeletconfig %q (%v)", key, startTime)
defer func() {
@@ -600,6 +596,13 @@ func (ctrl *Controller) syncKubeletConfig(key string) error {
return ctrl.syncStatusOnly(cfg, err, "could not get the TLSSecurityProfile from %v: %v", ctrlcommon.APIServerInstanceName, err)
}

updatedPools := map[string]int64{}

featureGates, renderedVersions, err := generateFeatureMap(ctrl.featureGateAccess, openshiftOnlyFeatureGates...)
if err != nil {
return fmt.Errorf("could not generate features map: %w", err)
}

for _, pool := range mcpPools {
if pool.Spec.Configuration.Name == "" {
updateDelay := 5 * time.Second
@@ -634,7 +637,7 @@ func (ctrl *Controller) syncKubeletConfig(key string) error {
return fmt.Errorf("could not get ControllerConfig %w", err)
}

originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(cc, ctrl.templatesDir, role, ctrl.featureGateAccess, apiServer)
originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(cc, ctrl.templatesDir, role, featureGates, apiServer)
if err != nil {
return ctrl.syncStatusOnly(cfg, err, "could not get original kubelet config: %v", err)
}
@@ -725,13 +728,73 @@ func (ctrl *Controller) syncKubeletConfig(key string) error {
}
klog.Infof("Applied KubeletConfig %v on MachineConfigPool %v", key, pool.Name)
ctrlcommon.UpdateStateMetric(ctrlcommon.MCCSubControllerState, "machine-config-controller-kubelet-config", "Sync Kubelet Config", pool.Name)
updatedPools[pool.Name] = pool.Status.ObservedGeneration
}
go ctrl.writebackMinimumKubeletVersionIfAppropriate(updatedPools, renderedVersions, nodeConfig, func() ([]*mcfgv1.MachineConfigPool, error) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

any particular reason this is a separate goroutine?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought it could be async in case it takes a while to have the mcps rollback and I didn't know what could be blocked by having it sync

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think a better solution might be to push this onto one of the work queues by doing something like this:

func (ctrl *Controller) syncKubeletConfig(key string) error {
	// Key lookup stuff above here.
	// Here, we detect that we need to do this for the current kubeletconfig, so we just kick that off.
	if ctrl.writeMinimumKubeletVersion[kubeletCfg.Name] {
		defer func() {
			delete(ctrl.writeMinimumKubeletVersion, kubeletCfg.Name)
		}()
		return ctrl.writebackMinimumKubeletVersionIfAppropriate(...)
	}

	//
	// Bulk of function above here.
	//

	if ctrl.isMinimumKubeletVersionWritebackNeeded(...) {
		// Here, we update our internal state to indicate that we need to perform this action
		// and enqueue the action.
		ctrl.writeMinimumKubeletVersion[kubeletcfg.Name] = true
		return ctrl.enqueue(kubeletCfg)
	}

	// End of function
}

We'll probably want to use a sync.Map since the work queue has multiple workers that could mutate the controller struct state at any given time.

return ctrl.getPoolsForKubeletConfig(cfg)
})
if err := ctrl.cleanUpDuplicatedMC(managedKubeletConfigKeyPrefix); err != nil {
return err
}
return ctrl.syncStatusOnly(cfg, nil)
}

func (ctrl *Controller) writebackMinimumKubeletVersionIfAppropriate(updatedPools map[string]int64, renderedVersions []configv1.MinimumComponentVersion, node *osev1.Node, poolGetter func() ([]*mcfgv1.MachineConfigPool, error)) {
renderedKubeletVersion := ""
for _, cv := range renderedVersions {
if cv.Component == configv1.MinimumComponentKubelet {
renderedKubeletVersion = cv.Version
}
}
if node.Spec.MinimumKubeletVersion == node.Status.MinimumKubeletVersion &&
node.Status.MinimumKubeletVersion == renderedKubeletVersion {
klog.InfoS("Skipping writeback to nodes.config.Status.MinimumKubeletVersion because situation not correct",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

question: why are these conditions not correct? isn't this first one just saying that the spec and status match what we expect?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if the status matches the spec, no need to do the update (though I probably should actually check renderedKubeletVersion there too)
for the second, if the status already matches the rendered then we've already done the write back for this rendered version
for the third: we're rendering a different version than what is set in the spec..
I think I have to rework this condition though

"nodes.config.Spec.MinimumKubeletVerison", node.Spec.MinimumKubeletVersion,
"nodes.config.Status.MinimumKubeletVerison", node.Status.MinimumKubeletVersion,
"renderedKubeletVersion", renderedKubeletVersion)
return
}

// This featuregate rollout was done as a result of a new minimum kubelet version rolling out, which means we need to wait for at least one
// node in each MCP to finish updating before we set the spec.
if err := wait.ExponentialBackoff(constants.NodeUpdateBackoff, func() (bool, error) {
mcps, err := poolGetter()
if err != nil {
return true, err
}
allUpdated := true
for _, mcp := range mcps {
if oldGeneration, ok := updatedPools[mcp.Name]; ok && (mcp.Status.UpdatedMachineCount == 0 && mcp.Status.ObservedGeneration > oldGeneration) {
allUpdated = false
}
}
return allUpdated, nil
}); err != nil {
klog.Errorf("Failed to update rendered kubelet version: %v", err)
}

if err := retry.RetryOnConflict(updateBackoff, func() error {
// Fetch the NodeConfig
nodeConfig, err := ctrl.nodeConfigLister.Get(ctrlcommon.ClusterNodeInstanceName)
if macherrors.IsNotFound(err) {
nodeConfig = createNewDefaultNodeconfig()
}
if nodeConfig.Spec.MinimumKubeletVersion != renderedKubeletVersion {
// skip this update, as we no longer want this spec value
return nil
}
if nodeConfig.Status.MinimumKubeletVersion == renderedKubeletVersion {
// this happened somewhere else, skip
return nil
}
nodeConfig.Status.MinimumKubeletVersion = renderedKubeletVersion
_, err = ctrl.configClient.ConfigV1().Nodes().Update(context.TODO(), node, metav1.UpdateOptions{})
return err
}); err != nil {
klog.Errorf("Failed to update rendered kubelet version to node status: %v", err)
}
}

// cleanUpDuplicatedMC removes the MC of non-updated GeneratedByControllerVersionKey if its name contains 'generated-kubelet'.
// BZ 1955517: upgrade when there are more than one configs, the duplicated and upgraded MC will be generated (func getManagedKubeletConfigKey())
// MC with old GeneratedByControllerVersionKey fails the upgrade.
47 changes: 32 additions & 15 deletions pkg/controller/kubelet-config/kubelet_config_features.go
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ import (
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"

configv1 "github.com/openshift/api/config/v1"
mcfgv1 "github.com/openshift/api/machineconfiguration/v1"
ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common"
"github.com/openshift/machine-config-operator/pkg/version"
@@ -73,14 +74,20 @@ func (ctrl *Controller) syncFeatureHandler(key string) error {
return fmt.Errorf("could not get the TLSSecurityProfile from %v: %v", ctrlcommon.APIServerInstanceName, err)
}

featureGates, renderedVersions, err := generateFeatureMap(ctrl.featureGateAccess, openshiftOnlyFeatureGates...)
if err != nil {
return fmt.Errorf("could not generate features map: %w", err)
}
// Fetch the Node Config object
nodeConfig, err := ctrl.nodeConfigLister.Get(ctrlcommon.ClusterNodeInstanceName)
if errors.IsNotFound(err) {
nodeConfig = createNewDefaultNodeconfig()
}

updatedPools := map[string]int64{}

for _, pool := range mcpPools {
var nodeConfig *osev1.Node
role := pool.Name
// Fetch the Node Config object
nodeConfig, err = ctrl.nodeConfigLister.Get(ctrlcommon.ClusterNodeInstanceName)
if errors.IsNotFound(err) {
nodeConfig = createNewDefaultNodeconfig()
}
// Get MachineConfig
managedKey, err := getManagedFeaturesKey(pool, ctrl.client)
if err != nil {
@@ -99,7 +106,7 @@ func (ctrl *Controller) syncFeatureHandler(key string) error {
}
}

rawCfgIgn, err := generateKubeConfigIgnFromFeatures(cc, ctrl.templatesDir, role, ctrl.featureGateAccess, nodeConfig, apiServer)
rawCfgIgn, err := generateKubeConfigIgnFromFeatures(cc, ctrl.templatesDir, role, featureGates, nodeConfig, apiServer)
if err != nil {
return err
}
@@ -125,9 +132,12 @@ func (ctrl *Controller) syncFeatureHandler(key string) error {
}
klog.Infof("Applied FeatureSet %v on MachineConfigPool %v", key, pool.Name)
ctrlcommon.UpdateStateMetric(ctrlcommon.MCCSubControllerState, "machine-config-controller-kubelet-config", "Sync FeatureSet", pool.Name)
updatedPools[pool.Name] = pool.Status.ObservedGeneration
}
go ctrl.writebackMinimumKubeletVersionIfAppropriate(updatedPools, renderedVersions, nodeConfig, func() ([]*mcfgv1.MachineConfigPool, error) {
return ctrl.mcpLister.List(labels.Everything())
})
return ctrl.cleanUpDuplicatedMC(managedFeaturesKeyPrefix)

}

func (ctrl *Controller) enqueueFeature(feat *osev1.FeatureGate) {
@@ -174,15 +184,16 @@ func (ctrl *Controller) deleteFeature(obj interface{}) {
// generateFeatureMap returns a map of enabled/disabled feature gate selection with exclusion list
//
//nolint:gocritic
func generateFeatureMap(featuregateAccess featuregates.FeatureGateAccess, exclusions ...osev1.FeatureGateName) (*map[string]bool, error) {
func generateFeatureMap(featuregateAccess featuregates.FeatureGateAccess, exclusions ...osev1.FeatureGateName) (map[string]bool, []configv1.MinimumComponentVersion, error) {
rv := make(map[string]bool)

if !featuregateAccess.AreInitialFeatureGatesObserved() {
return nil, fmt.Errorf("initial feature gates are not observed")
return nil, nil, fmt.Errorf("initial feature gates are not observed")
}

features, err := featuregateAccess.CurrentFeatureGates()
if err != nil {
return nil, fmt.Errorf("could not get current feature gates: %w", err)
return nil, nil, fmt.Errorf("could not get current feature gates: %w", err)
}

for _, feat := range features.KnownFeatures() {
@@ -197,11 +208,11 @@ func generateFeatureMap(featuregateAccess featuregates.FeatureGateAccess, exclus
for _, excluded := range exclusions {
delete(rv, string(excluded))
}
return &rv, nil
return rv, features.RenderedMinimumComponentVersions(), nil
}

func generateKubeConfigIgnFromFeatures(cc *mcfgv1.ControllerConfig, templatesDir, role string, featureGateAccess featuregates.FeatureGateAccess, nodeConfig *osev1.Node, apiServer *osev1.APIServer) ([]byte, error) {
originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(cc, templatesDir, role, featureGateAccess, apiServer)
func generateKubeConfigIgnFromFeatures(cc *mcfgv1.ControllerConfig, templatesDir, role string, featureGates map[string]bool, nodeConfig *osev1.Node, apiServer *osev1.APIServer) ([]byte, error) {
originalKubeConfig, err := generateOriginalKubeletConfigWithFeatureGates(cc, templatesDir, role, featureGates, apiServer)
if err != nil {
return nil, err
}
@@ -227,12 +238,18 @@ func generateKubeConfigIgnFromFeatures(cc *mcfgv1.ControllerConfig, templatesDir
func RunFeatureGateBootstrap(templateDir string, featureGateAccess featuregates.FeatureGateAccess, nodeConfig *osev1.Node, controllerConfig *mcfgv1.ControllerConfig, mcpPools []*mcfgv1.MachineConfigPool, apiServer *osev1.APIServer) ([]*mcfgv1.MachineConfig, error) {
machineConfigs := []*mcfgv1.MachineConfig{}

// TODO FIXME: do we need rendered versions here?
featureGates, _, err := generateFeatureMap(featureGateAccess, openshiftOnlyFeatureGates...)
if err != nil {
return nil, fmt.Errorf("could not generate features map: %w", err)
}

for _, pool := range mcpPools {
role := pool.Name
if nodeConfig == nil {
nodeConfig = createNewDefaultNodeconfig()
}
rawCfgIgn, err := generateKubeConfigIgnFromFeatures(controllerConfig, templateDir, role, featureGateAccess, nodeConfig, apiServer)
rawCfgIgn, err := generateKubeConfigIgnFromFeatures(controllerConfig, templateDir, role, featureGates, nodeConfig, apiServer)
if err != nil {
return nil, err
}
Loading