Skip to content

Commit eaff387

Browse files
committed
feat: Add dynamic addon handler application via controller
This commit enables out of upgrade flow application of lifecycle hooks. This enables dynamic updates of cluster variables that will result in application of the lifecycle hook without requiring an upgarde to be in progress.
1 parent b6ed5e9 commit eaff387

File tree

8 files changed

+222
-101
lines changed

8 files changed

+222
-101
lines changed

cmd/main.go

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/api/v1alpha1"
3131
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers"
3232
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/server"
33+
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/controllers/addons"
3334
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/controllers/namespacesync"
3435
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/aws"
3536
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/pkg/handlers/docker"
@@ -100,6 +101,7 @@ func main() {
100101
genericMetaHandlers := generic.New()
101102

102103
namespacesyncOptions := namespacesync.Options{}
104+
addonsOptions := addons.Options{}
103105

104106
// Initialize and parse command line flags.
105107
logs.AddFlags(pflag.CommandLine, logs.SkipLoggingConfigurationFlags())
@@ -111,6 +113,7 @@ func main() {
111113
dockerMetaHandlers.AddFlags(pflag.CommandLine)
112114
nutanixMetaHandlers.AddFlags(pflag.CommandLine)
113115
namespacesyncOptions.AddFlags(pflag.CommandLine)
116+
addonsOptions.AddFlags(pflag.CommandLine)
114117
pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
115118
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
116119
pflag.Parse()
@@ -134,8 +137,9 @@ func main() {
134137
os.Exit(1)
135138
}
136139

140+
lifecycleHandlers := genericLifecycleHandlers.AllHandlers(mgr)
137141
var allHandlers []handlers.Named
138-
allHandlers = append(allHandlers, genericLifecycleHandlers.AllHandlers(mgr)...)
142+
allHandlers = append(allHandlers, lifecycleHandlers...)
139143
allHandlers = append(allHandlers, awsMetaHandlers.AllHandlers(mgr)...)
140144
allHandlers = append(allHandlers, dockerMetaHandlers.AllHandlers(mgr)...)
141145
allHandlers = append(allHandlers, nutanixMetaHandlers.AllHandlers(mgr)...)
@@ -174,6 +178,15 @@ func main() {
174178
os.Exit(1)
175179
}
176180

181+
if err := addons.NewController(mgr.GetClient(), lifecycleHandlers).SetupWithManager(
182+
signalCtx,
183+
mgr,
184+
controller.Options{MaxConcurrentReconciles: namespacesyncOptions.Concurrency},
185+
); err != nil {
186+
setupLog.Error(err, "unable to create controller", "controller", "addons.Reconciler")
187+
os.Exit(1)
188+
}
189+
177190
if err := mgr.Start(signalCtx); err != nil {
178191
setupLog.Error(err, "unable to start controller manager")
179192
os.Exit(1)

common/pkg/capi/clustertopology/handlers/lifecycle/interfaces.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ package lifecycle
66
import (
77
"context"
88

9+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
910
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
1011
)
1112

@@ -44,3 +45,10 @@ type BeforeClusterDelete interface {
4445
*runtimehooksv1.BeforeClusterDeleteResponse,
4546
)
4647
}
48+
49+
type OnClusterSpecUpdated interface {
50+
OnClusterSpecUpdated(
51+
ctx context.Context,
52+
cluster *clusterv1.Cluster,
53+
) error
54+
}

pkg/controllers/addons/controller.go

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
// Copyright 2024 Nutanix. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package addons
5+
6+
import (
7+
"context"
8+
"fmt"
9+
10+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
11+
"sigs.k8s.io/cluster-api/util/predicates"
12+
ctrl "sigs.k8s.io/controller-runtime"
13+
"sigs.k8s.io/controller-runtime/pkg/builder"
14+
"sigs.k8s.io/controller-runtime/pkg/client"
15+
"sigs.k8s.io/controller-runtime/pkg/controller"
16+
"sigs.k8s.io/controller-runtime/pkg/event"
17+
"sigs.k8s.io/controller-runtime/pkg/predicate"
18+
19+
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers"
20+
"github.com/nutanix-cloud-native/cluster-api-runtime-extensions-nutanix/common/pkg/capi/clustertopology/handlers/lifecycle"
21+
)
22+
23+
type Reconciler struct {
24+
client client.Client
25+
26+
handlers []lifecycle.OnClusterSpecUpdated
27+
}
28+
29+
func NewController(cl client.Client, lifecycleHandlers []handlers.Named) *Reconciler {
30+
specUpdatedHandlers := []lifecycle.OnClusterSpecUpdated{}
31+
for _, h := range lifecycleHandlers {
32+
if h, ok := h.(lifecycle.OnClusterSpecUpdated); ok {
33+
specUpdatedHandlers = append(specUpdatedHandlers, h)
34+
}
35+
}
36+
return &Reconciler{
37+
client: cl,
38+
handlers: specUpdatedHandlers,
39+
}
40+
}
41+
42+
func (r *Reconciler) SetupWithManager(
43+
ctx context.Context,
44+
mgr ctrl.Manager,
45+
options controller.Options,
46+
) error {
47+
hasTopologyPredicate := predicates.ClusterHasTopology(ctrl.LoggerFrom(ctx))
48+
generationChangedPredicate := predicate.GenerationChangedPredicate{}
49+
50+
err := ctrl.NewControllerManagedBy(mgr).
51+
For(&clusterv1.Cluster{}, builder.WithPredicates(
52+
predicate.Funcs{
53+
CreateFunc: func(e event.CreateEvent) bool {
54+
return false
55+
},
56+
UpdateFunc: func(e event.UpdateEvent) bool {
57+
// Only reconcile Cluster with topology.
58+
if !hasTopologyPredicate.UpdateFunc(e) {
59+
return false
60+
}
61+
if !generationChangedPredicate.Update(e) {
62+
return false
63+
}
64+
cluster, ok := e.ObjectNew.(*clusterv1.Cluster)
65+
if !ok {
66+
return false
67+
}
68+
69+
return !cluster.Spec.Paused
70+
},
71+
DeleteFunc: func(e event.DeleteEvent) bool {
72+
return false
73+
},
74+
GenericFunc: func(e event.GenericEvent) bool {
75+
return false
76+
},
77+
},
78+
)).
79+
Named("addons").
80+
WithOptions(options).
81+
Complete(r)
82+
if err != nil {
83+
return fmt.Errorf("failed to set up with controller manager: %w", err)
84+
}
85+
86+
return nil
87+
}
88+
89+
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
90+
cluster := &clusterv1.Cluster{}
91+
if err := r.client.Get(ctx, req.NamespacedName, cluster); err != nil {
92+
return ctrl.Result{}, client.IgnoreNotFound(err)
93+
}
94+
95+
for _, h := range r.handlers {
96+
if err := h.OnClusterSpecUpdated(ctx, cluster); err != nil {
97+
return ctrl.Result{}, fmt.Errorf(
98+
"failed to reconcile cluster %s: %w",
99+
client.ObjectKeyFromObject(cluster),
100+
err,
101+
)
102+
}
103+
}
104+
105+
return ctrl.Result{}, nil
106+
}

pkg/controllers/addons/doc.go

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
// Copyright 2024 Nutanix. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch
5+
package addons

pkg/controllers/addons/flags.go

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
// Copyright 2024 Nutanix. All rights reserved.
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
package addons
5+
6+
import (
7+
"github.com/spf13/pflag"
8+
)
9+
10+
type Options struct {
11+
Concurrency int
12+
}
13+
14+
func (o *Options) AddFlags(flags *pflag.FlagSet) {
15+
pflag.CommandLine.IntVar(
16+
&o.Concurrency,
17+
"addons-clusters-concurrency",
18+
10,
19+
"Number of clusters to sync concurrently.",
20+
)
21+
}
Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,6 @@
11
// Copyright 2023 Nutanix. All rights reserved.
22
// SPDX-License-Identifier: Apache-2.0
33

4-
// Package calico provides a handler for managing Calico deployments on clusters, configurable via
5-
// labels and annotations.
6-
//
7-
// To enable Calico deployment, a cluster must be labelled with `caren.nutanix.com/cni=calico`.
8-
// This will ensure the Tigera Configmap and associated ClusterResourceSet.
9-
//
104
// +kubebuilder:rbac:groups=addons.cluster.x-k8s.io,resources=clusterresourcesets,verbs=watch;list;get;create;patch;update;delete
115
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=watch;list;get;create;patch;update;delete
126
package ccm

pkg/handlers/generic/lifecycle/ccm/handler.go

Lines changed: 33 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -68,28 +68,45 @@ func (c *CCMHandler) AfterControlPlaneInitialized(
6868
req *runtimehooksv1.AfterControlPlaneInitializedRequest,
6969
resp *runtimehooksv1.AfterControlPlaneInitializedResponse,
7070
) {
71-
commonResponse := &runtimehooksv1.CommonResponse{}
72-
c.apply(ctx, &req.Cluster, commonResponse)
73-
resp.Status = commonResponse.GetStatus()
74-
resp.Message = commonResponse.GetMessage()
71+
c.handle(ctx, &req.Cluster, &resp.CommonResponse)
7572
}
7673

7774
func (c *CCMHandler) BeforeClusterUpgrade(
7875
ctx context.Context,
7976
req *runtimehooksv1.BeforeClusterUpgradeRequest,
8077
resp *runtimehooksv1.BeforeClusterUpgradeResponse,
8178
) {
82-
commonResponse := &runtimehooksv1.CommonResponse{}
83-
c.apply(ctx, &req.Cluster, commonResponse)
84-
resp.Status = commonResponse.GetStatus()
85-
resp.Message = commonResponse.GetMessage()
79+
c.handle(ctx, &req.Cluster, &resp.CommonResponse)
8680
}
8781

88-
func (c *CCMHandler) apply(
82+
func (c *CCMHandler) OnClusterSpecUpdated(
83+
ctx context.Context,
84+
cluster *clusterv1.Cluster,
85+
) error {
86+
if err := c.apply(ctx, cluster); err != nil {
87+
return fmt.Errorf("failed to apply CCM: %w", err)
88+
}
89+
90+
return nil
91+
}
92+
93+
func (c *CCMHandler) handle(
8994
ctx context.Context,
9095
cluster *clusterv1.Cluster,
9196
resp *runtimehooksv1.CommonResponse,
9297
) {
98+
if err := c.apply(ctx, cluster); err != nil {
99+
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
100+
resp.SetMessage(err.Error())
101+
}
102+
103+
resp.SetStatus(runtimehooksv1.ResponseStatusSuccess)
104+
}
105+
106+
func (c *CCMHandler) apply(
107+
ctx context.Context,
108+
cluster *clusterv1.Cluster,
109+
) error {
93110
clusterKey := ctrlclient.ObjectKeyFromObject(cluster)
94111

95112
log := ctrl.LoggerFrom(ctx).WithValues(
@@ -103,37 +120,17 @@ func (c *CCMHandler) apply(
103120
if err != nil {
104121
if variables.IsNotFoundError(err) {
105122
log.V(5).Info("Skipping CCM handler.")
106-
return
123+
return nil
107124
}
108-
log.Error(
109-
err,
110-
"failed to read CCM from cluster definition",
111-
)
112-
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
113-
resp.SetMessage(
114-
fmt.Sprintf("failed to read CCM from cluster definition: %v",
115-
err,
116-
),
117-
)
118-
return
125+
return fmt.Errorf("failed to read CCM from cluster definition: %w", err)
119126
}
120127

121128
clusterConfigVar, err := variables.Get[apivariables.ClusterConfigSpec](
122129
varMap,
123130
v1alpha1.ClusterConfigVariableName,
124131
)
125132
if err != nil {
126-
log.Error(
127-
err,
128-
"failed to read clusterConfig variable from cluster definition",
129-
)
130-
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
131-
resp.SetMessage(
132-
fmt.Sprintf("failed to read clusterConfig variable from cluster definition: %v",
133-
err,
134-
),
135-
)
136-
return
133+
return fmt.Errorf("failed to read clusterConfig variable from cluster definition: %w", err)
137134
}
138135

139136
// There's a 1:1 mapping of infra to CCM provider. We derive the CCM provider from the infra.
@@ -147,21 +144,13 @@ func (c *CCMHandler) apply(
147144
handler = c.ProviderHandler[v1alpha1.CCMProviderNutanix]
148145
default:
149146
log.Info(fmt.Sprintf("No CCM handler provided for infra kind %s", infraKind))
150-
return
147+
return nil
151148
}
152149

153150
err = handler.Apply(ctx, cluster, &clusterConfigVar, log)
154151
if err != nil {
155-
log.Error(
156-
err,
157-
"failed to deploy CCM for cluster",
158-
)
159-
resp.SetStatus(runtimehooksv1.ResponseStatusFailure)
160-
resp.SetMessage(
161-
fmt.Sprintf("failed to deploy CCM for cluster: %v",
162-
err,
163-
),
164-
)
165-
return
152+
return fmt.Errorf("failed to deploy CCM for cluster: %w", err)
166153
}
154+
155+
return nil
167156
}

0 commit comments

Comments
 (0)