Skip to content

Commit 6372508

Browse files
authored
Merge pull request kubernetes-sigs#2 from chuckha/updates
Several updates
2 parents d81278a + bfe1e2a commit 6372508

File tree

7 files changed

+191
-78
lines changed

7 files changed

+191
-78
lines changed

README.md

+1-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
# Cluster API Provider Docker
22

3-
A temporary home for CAPD
4-
53
## Manager Container Image
64

75
A sample is built and hosted at `gcr.io/kubernetes1-226021/capd-manager:latest`
@@ -31,7 +29,7 @@ docker build -t my-repository/capd-manager:latest .
3129

3230
# Testing out CAPD
3331

34-
Tested on: Linux, OS X
32+
Tested on: Linux, works ok on OS X sometimes
3533

3634
Requirements: `kind` > 0.3.0 and `kubectl`
3735

@@ -66,4 +64,3 @@ The kubeconfig is on the management cluster in secrets. Grab it and write it to
6664
`kubectl get secrets -o jsonpath='{.data.kubeconfig}' kubeconfig-my-cluster | base64 --decode > ~/.kube/kind-config-my-cluster`
6765

6866
`kubectl get po --all-namespaces --kubeconfig ~/.kube/kind-config-my-cluster`
69-

actuators/machine.go

+66-24
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
"time"
2424

2525
"github.com/chuckha/cluster-api-provider-docker/kind/actions"
26+
apicorev1 "k8s.io/api/core/v1"
2627
"k8s.io/apimachinery/pkg/types"
2728
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
2829
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
@@ -78,14 +79,19 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
7879
if setValue == clusterAPIControlPlaneSetLabel {
7980
if len(controlPlanes) > 0 {
8081
fmt.Println("Adding a control plane")
81-
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.Spec.Versions.ControlPlane)
82+
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane)
8283
if err != nil {
8384
fmt.Printf("%+v", err)
8485
return err
8586
}
86-
name := providerID(controlPlaneNode.Name())
87-
machine.Spec.ProviderID = &name
88-
return m.save(old, machine)
87+
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
88+
if err != nil {
89+
fmt.Printf("%+v", err)
90+
return err
91+
}
92+
providerID := providerID(controlPlaneNode.Name())
93+
machine.Spec.ProviderID = &providerID
94+
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
8995
}
9096

9197
fmt.Println("Creating a brand new cluster")
@@ -99,16 +105,20 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
99105
fmt.Printf("%+v\n", err)
100106
return err
101107
}
102-
controlPlaneNode, err := actions.CreateControlPlane(c.Name, lbip, machine.Spec.Versions.ControlPlane)
108+
controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane)
103109
if err != nil {
104110
fmt.Printf("%+v\n", err)
105111
return err
106112
}
107-
113+
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
114+
if err != nil {
115+
fmt.Printf("%+v", err)
116+
return err
117+
}
108118
// set the machine's providerID
109-
name := providerID(controlPlaneNode.Name())
110-
machine.Spec.ProviderID = &name
111-
if err := m.save(old, machine); err != nil {
119+
providerID := providerID(controlPlaneNode.Name())
120+
machine.Spec.ProviderID = &providerID
121+
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
112122
fmt.Printf("%+v\n", err)
113123
return err
114124
}
@@ -132,18 +142,37 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
132142
}
133143

134144
fmt.Println("Creating a new worker node")
135-
worker, err := actions.AddWorker(c.Name, machine.Spec.Versions.Kubelet)
145+
worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet)
136146
if err != nil {
137147
fmt.Printf("%+v", err)
138148
return err
139149
}
140-
name := providerID(worker.Name())
141-
machine.Spec.ProviderID = &name
142-
return m.save(old, machine)
150+
providerID := providerID(worker.Name())
151+
machine.Spec.ProviderID = &providerID
152+
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
153+
if err != nil {
154+
fmt.Printf("%+v", err)
155+
return err
156+
}
157+
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
143158
}
144159

160+
// Delete returns nil when the machine no longer exists or when a successful delete has happened.
145161
func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
146-
return actions.DeleteNode(cluster.Name, providerNameToLookupID(*machine.Spec.ProviderID))
162+
exists, err := m.Exists(ctx, cluster, machine)
163+
if err != nil {
164+
return err
165+
}
166+
if exists {
167+
setValue := getRole(machine)
168+
if setValue == clusterAPIControlPlaneSetLabel {
169+
fmt.Printf("Deleting a control plane: %q\n", machine.GetName())
170+
return actions.DeleteControlPlane(cluster.Name, machine.GetName())
171+
}
172+
fmt.Printf("Deleting a worker: %q\n", machine.GetName())
173+
return actions.DeleteWorker(cluster.Name, machine.GetName())
174+
}
175+
return nil
147176
}
148177

149178
func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
@@ -152,16 +181,16 @@ func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machin
152181
}
153182

154183
func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) {
155-
if machine.Spec.ProviderID == nil {
156-
return false, nil
184+
if machine.Spec.ProviderID != nil {
185+
return true, nil
157186
}
158-
fmt.Println("Looking for a docker container named", providerNameToLookupID(*machine.Spec.ProviderID))
187+
159188
role := getRole(machine)
160189
kindRole := CAPIroleToKindRole(role)
161190
labels := []string{
162191
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, kindRole),
163192
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name),
164-
fmt.Sprintf("name=^%s$", providerNameToLookupID(*machine.Spec.ProviderID)),
193+
fmt.Sprintf("name=^%s$", machine.GetName()),
165194
}
166195
fmt.Printf("using labels: %v\n", labels)
167196
nodeList, err := nodes.List(labels...)
@@ -172,7 +201,8 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
172201
return len(nodeList) >= 1, nil
173202
}
174203

175-
func (m *Machine) save(old, new *clusterv1.Machine) error {
204+
// patches the object and saves the status.
205+
func (m *Machine) save(old, new *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
176206
fmt.Println("updating machine")
177207
p, err := patch.NewJSONPatch(old, new)
178208
if err != nil {
@@ -186,19 +216,22 @@ func (m *Machine) save(old, new *clusterv1.Machine) error {
186216
fmt.Printf("%+v\n", err)
187217
return err
188218
}
189-
if _, err := m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb); err != nil {
219+
new, err = m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb)
220+
if err != nil {
190221
fmt.Printf("%+v\n", err)
191222
return err
192223
}
193224
fmt.Println("updated machine")
194225
}
226+
// set the noderef after so we don't try and patch it in during the first update
227+
new.Status.NodeRef = noderef
228+
if _, err := m.ClusterAPI.Machines(old.Namespace).UpdateStatus(new); err != nil {
229+
fmt.Printf("%+v\n", err)
230+
return err
231+
}
195232
return nil
196233
}
197234

198-
func providerNameToLookupID(providerName string) string {
199-
return providerName[len("docker://"):]
200-
}
201-
202235
func providerID(name string) string {
203236
return fmt.Sprintf("docker://%s", name)
204237
}
@@ -210,3 +243,12 @@ func CAPIroleToKindRole(CAPIRole string) string {
210243
}
211244
return CAPIRole
212245
}
246+
247+
func getNodeRef(name, uid string) *apicorev1.ObjectReference {
248+
return &apicorev1.ObjectReference{
249+
Kind: "Node",
250+
APIVersion: apicorev1.SchemeGroupVersion.String(),
251+
Name: name,
252+
UID: types.UID(uid),
253+
}
254+
}

cmd/capdctl/main.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ import (
2828
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
2929
)
3030

31+
// TODO: Generate the RBAC stuff from somewhere instead of copy pasta
32+
3133
const (
3234
// Important to keep this consistent.
3335
controlPlaneSet = "controlplane"
@@ -53,7 +55,7 @@ func main() {
5355

5456
capd := flag.NewFlagSet("capd", flag.ExitOnError)
5557
capdImage := capd.String("capd-image", "gcr.io/kubernetes1-226021/capd-manager:latest", "The capd manager image to run")
56-
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.1", "The capi manager image to run")
58+
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.3", "The capi manager image to run")
5759

5860
controlPlane := flag.NewFlagSet("control-plane", flag.ExitOnError)
5961
controlPlaneOpts := new(machineOptions)
@@ -122,7 +124,6 @@ subcommands are:
122124
123125
cluster - Write a capd cluster object to stdout
124126
example: capdctl cluster -cluster-name my-cluster -namespace my-namespace | kubectl apply -f -
125-
126127
`
127128
}
128129

@@ -153,14 +154,13 @@ func machineYAML(opts *machineOptions) string {
153154
Namespace: *opts.namespace,
154155
Labels: map[string]string{
155156
"cluster.k8s.io/cluster-name": *opts.clusterName,
156-
"set": *opts.set,
157+
"set": *opts.set,
157158
},
158159
},
159160
Spec: v1alpha1.MachineSpec{
160161
ProviderSpec: v1alpha1.ProviderSpec{},
161162
},
162163
}
163-
// TODO: 🤔
164164
if *opts.set == controlPlaneSet {
165165
machine.Spec.Versions.ControlPlane = *opts.version
166166
}

cmd/kind-test/main.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,11 @@ func main() {
4848
if err != nil {
4949
panic(fmt.Sprintf("%+v", err))
5050
}
51-
if _, err := actions.CreateControlPlane(clusterName, ip, version); err != nil {
51+
if _, err := actions.CreateControlPlane(clusterName, inputs[1], ip, version); err != nil {
5252
panic(fmt.Sprintf("%+v", err))
5353
}
5454
case "add-worker":
55-
if _, err := actions.AddWorker(clusterName, version); err != nil {
55+
if _, err := actions.AddWorker(clusterName, inputs[1], version); err != nil {
5656
panic(fmt.Sprintf("%+v", err))
5757
}
5858
case "delete-node":
@@ -65,7 +65,7 @@ func main() {
6565
panic(fmt.Sprintf("%+v", err))
6666
}
6767
case "add-control-plane":
68-
if _, err := actions.AddControlPlane(clusterName, version); err != nil {
68+
if _, err := actions.AddControlPlane(clusterName, inputs[1], version); err != nil {
6969
panic(fmt.Sprintf("%+v", err))
7070
}
7171
case "set-cluster-name":

kind/actions/cluster_actions.go

+69-7
Original file line numberDiff line numberDiff line change
@@ -243,10 +243,10 @@ func KubeadmJoin(clusterName string, node *nodes.Node) error {
243243
return nil
244244
}
245245

246-
func SetNodeRef(clusterName, nodeName string) error {
246+
func SetNodeProviderRef(clusterName, nodeName string) error {
247247
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
248248
if err != nil {
249-
return nil
249+
return err
250250
}
251251

252252
node, err := nodes.BootstrapControlPlaneNode(allNodes)
@@ -274,28 +274,90 @@ func SetNodeRef(clusterName, nodeName string) error {
274274
return nil
275275
}
276276

277-
func RemoveNode(clusterName, nodeName string) error {
277+
func GetNodeRefUID(clusterName, nodeName string) (string, error) {
278+
// k get nodes my-cluster-worker -o custom-columns=UID:.metadata.uid --no-headers
278279
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
279280
if err != nil {
280-
return nil
281+
return "", err
281282
}
282283

283284
node, err := nodes.BootstrapControlPlaneNode(allNodes)
284285
if err != nil {
285-
return err
286+
return "", err
286287
}
288+
289+
patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName)
290+
fmt.Println("trying to apply:", patch)
287291
cmd := node.Command(
288292
"kubectl",
289293
"--kubeconfig", "/etc/kubernetes/admin.conf",
290-
"delete",
294+
"get",
291295
"node", nodeName,
296+
"--output=custom-columns=UID:.metadata.uid",
297+
"--no-headers",
292298
)
293299
lines, err := exec.CombinedOutputLines(cmd)
294300
if err != nil {
295301
for _, line := range lines {
296302
fmt.Println(line)
297303
}
298-
return errors.Wrap(err, "failed to remove node from cluster")
304+
return "", errors.Wrap(err, "failed get node ref UID")
305+
}
306+
return strings.TrimSpace(lines[0]), nil
307+
}
308+
309+
// DeleteClusterNode will remove the kubernetes node from the list of nodes (during a kubectl get nodes).
310+
func DeleteClusterNode(clusterName, nodeName string) error {
311+
// get all control plane nodes
312+
allControlPlanes, err := nodes.List(
313+
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName),
314+
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue),
315+
)
316+
if err != nil {
317+
return err
318+
}
319+
var node nodes.Node
320+
// pick one that doesn't match the node name we are trying to delete
321+
for _, n := range allControlPlanes {
322+
if n.Name() != nodeName {
323+
node = n
324+
break
325+
}
326+
}
327+
cmd := node.Command(
328+
"kubectl",
329+
"--kubeconfig", "/etc/kubernetes/admin.conf",
330+
"delete", "node", nodeName,
331+
)
332+
lines, err := exec.CombinedOutputLines(cmd)
333+
if err != nil {
334+
for _, line := range lines {
335+
fmt.Println(line)
336+
}
337+
return errors.Wrap(err, "failed to delete cluster node")
338+
}
339+
return nil
340+
}
341+
342+
// KubeadmReset will run `kubeadm reset` on the control plane to remove.
343+
func KubeadmReset(clusterName, nodeName string) error {
344+
nodeList, err := nodes.List(
345+
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName),
346+
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue),
347+
fmt.Sprintf("name=^%s$", nodeName),
348+
)
349+
if len(nodeList) < 1 {
350+
return errors.Errorf("could nto find node %q", nodeName)
351+
}
352+
node := nodeList[0]
353+
354+
cmd := node.Command("kubeadm", "reset", "--force")
355+
lines, err := exec.CombinedOutputLines(cmd)
356+
if err != nil {
357+
for _, line := range lines {
358+
fmt.Println(line)
359+
}
360+
return errors.Wrap(err, "failed to reset node")
299361
}
300362

301363
return nil

0 commit comments

Comments
 (0)