Skip to content

Commit 3c71970

Browse files
committed
Create job to delete individual previews
Signed-off-by: ArthurSens <[email protected]>
1 parent 9b382b6 commit 3c71970

File tree

2 files changed

+286
-0
lines changed

2 files changed

+286
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
import { Werft } from "./util/werft";
2+
import * as Tracing from "./observability/tracing";
3+
import { SpanStatusCode } from "@opentelemetry/api";
4+
import { exec } from "./util/shell";
5+
import { previewNameFromBranchName } from "./util/preview";
6+
import { CORE_DEV_KUBECONFIG_PATH, HARVESTER_KUBECONFIG_PATH } from "./jobs/build/const";
7+
import { deleteDNSRecord } from "./util/gcloud";
8+
import * as VM from "./vm/vm";
9+
import * as fs from "fs";
10+
11+
// for testing purposes
12+
// if set to 'true' it shows only previews that would be deleted
13+
const DRY_RUN = false;
14+
15+
const SLICES = {
16+
CONFIGURE_ACCESS: "Configuring access to relevant resources",
17+
INSTALL_HARVESTER_KUBECONFIG: "Install Harvester kubeconfig",
18+
};
19+
20+
// Will be set once tracing has been initialized
21+
let werft: Werft;
22+
23+
const context: any = JSON.parse(fs.readFileSync("context.json").toString());
24+
const annotations = context.Annotations || {};
25+
26+
Tracing.initialize()
27+
.then(() => {
28+
werft = new Werft("delete-preview-environment-cron");
29+
})
30+
.then(() => deletePreviewEnvironment())
31+
.then(() => cleanLoadbalancer())
32+
.catch((err) => {
33+
werft.rootSpan.setStatus({
34+
code: SpanStatusCode.ERROR,
35+
message: err,
36+
});
37+
console.error("Werft job failed with an error", err);
38+
// Explicitly not using process.exit as we need to flush tracing, see tracing.js
39+
process.exitCode = 1;
40+
})
41+
.finally(() => {
42+
werft.phase("Flushing telemetry", "Flushing telemetry before stopping job");
43+
werft.endAllSpans();
44+
});
45+
46+
class HarvesterPreviewEnvironment {
47+
// The prefix we use for the namespace
48+
static readonly namespacePrefix: string = "preview-";
49+
50+
// The name of the namespace that the VM and related resources are in, e.g. preview-my-branch
51+
namespace: string;
52+
53+
// The name of the preview environment, e.g. my-branch
54+
name: string;
55+
56+
// The namespace in the k3s cluster where all resources are (default)
57+
k3sNamespace: string = "default";
58+
59+
constructor(namespace: string) {
60+
this.namespace = namespace;
61+
this.name = namespace.replace(HarvesterPreviewEnvironment.namespacePrefix, "");
62+
}
63+
64+
async delete(): Promise<void> {
65+
VM.deleteVM({ name: this.name });
66+
}
67+
68+
async removeDNSRecords(sliceID: string) {
69+
werft.log(sliceID, "Deleting harvester related DNS records for the preview environment");
70+
await Promise.all([
71+
deleteDNSRecord(
72+
"A",
73+
`*.ssh.ws.${this.name}.preview.gitpod-dev.com`,
74+
"gitpod-core-dev",
75+
"preview-gitpod-dev-com",
76+
sliceID,
77+
),
78+
deleteDNSRecord(
79+
"A",
80+
`*.ws.${this.name}.preview.gitpod-dev.com`,
81+
"gitpod-core-dev",
82+
"preview-gitpod-dev-com",
83+
sliceID,
84+
),
85+
deleteDNSRecord(
86+
"A",
87+
`*.${this.name}.preview.gitpod-dev.com`,
88+
"gitpod-core-dev",
89+
"preview-gitpod-dev-com",
90+
sliceID,
91+
),
92+
deleteDNSRecord(
93+
"A",
94+
`${this.name}.preview.gitpod-dev.com`,
95+
"gitpod-core-dev",
96+
"preview-gitpod-dev-com",
97+
sliceID,
98+
),
99+
deleteDNSRecord(
100+
"A",
101+
`prometheus-${this.name}.preview.gitpod-dev.com`,
102+
"gitpod-core-dev",
103+
"preview-gitpod-dev-com",
104+
sliceID,
105+
),
106+
deleteDNSRecord(
107+
"TXT",
108+
`prometheus-${this.name}.preview.gitpod-dev.com`,
109+
"gitpod-core-dev",
110+
"preview-gitpod-dev-com",
111+
sliceID,
112+
),
113+
deleteDNSRecord(
114+
"A",
115+
`grafana-${this.name}.preview.gitpod-dev.com`,
116+
"gitpod-core-dev",
117+
"preview-gitpod-dev-com",
118+
sliceID,
119+
),
120+
deleteDNSRecord(
121+
"TXT",
122+
`grafana-${this.name}.preview.gitpod-dev.com`,
123+
"gitpod-core-dev",
124+
"preview-gitpod-dev-com",
125+
sliceID,
126+
),
127+
]);
128+
}
129+
130+
/**
131+
* Given a branch name it will return the expected namespace of the preview environment
132+
*/
133+
static expectedNamespaceFromBranch(branch: string): string {
134+
const previewName = previewNameFromBranchName(branch);
135+
return `${HarvesterPreviewEnvironment.namespacePrefix}${previewName}`;
136+
}
137+
}
138+
139+
type PreviewEnvironment = HarvesterPreviewEnvironment;
140+
141+
async function deletePreviewEnvironment() {
142+
werft.phase("Configure access");
143+
try {
144+
const GCLOUD_SERVICE_ACCOUNT_PATH = "/mnt/secrets/gcp-sa/service-account.json";
145+
exec(`gcloud auth activate-service-account --key-file "${GCLOUD_SERVICE_ACCOUNT_PATH}"`, {
146+
slice: SLICES.CONFIGURE_ACCESS,
147+
});
148+
exec(
149+
`KUBECONFIG=${CORE_DEV_KUBECONFIG_PATH} gcloud container clusters get-credentials core-dev --zone europe-west1-b --project gitpod-core-dev`,
150+
{ slice: SLICES.CONFIGURE_ACCESS },
151+
);
152+
werft.done(SLICES.CONFIGURE_ACCESS);
153+
} catch (err) {
154+
werft.fail(SLICES.CONFIGURE_ACCESS, err);
155+
}
156+
157+
werft.phase("Install Harvester kubeconfig");
158+
try {
159+
exec(`cp /mnt/secrets/harvester-kubeconfig/harvester-kubeconfig.yml ${HARVESTER_KUBECONFIG_PATH}`, {
160+
slice: SLICES.INSTALL_HARVESTER_KUBECONFIG,
161+
});
162+
werft.done(SLICES.INSTALL_HARVESTER_KUBECONFIG);
163+
} catch (err) {
164+
werft.fail(SLICES.INSTALL_HARVESTER_KUBECONFIG, err);
165+
}
166+
167+
removePreviewEnvironment(new HarvesterPreviewEnvironment(annotations["previewNamespace"]))
168+
}
169+
170+
async function removePreviewEnvironment(previewEnvironment: PreviewEnvironment) {
171+
const sliceID = `Deleting preview ${previewEnvironment.name}`;
172+
werft.log(sliceID, `Starting deletion of all resources related to ${previewEnvironment.name}`);
173+
try {
174+
// We're running these promises sequentially to make it easier to read the log output.
175+
await previewEnvironment.removeDNSRecords(sliceID);
176+
await previewEnvironment.delete();
177+
werft.done(sliceID);
178+
} catch (e) {
179+
werft.failSlice(sliceID, e);
180+
}
181+
}
182+
183+
async function cleanLoadbalancer() {
184+
const fetchPhase = "fetching unuse loadbalancer";
185+
const deletionPhase = "deleting unused load balancers";
186+
187+
werft.phase(fetchPhase);
188+
let lbsToDelete: string[];
189+
try {
190+
// get all loadbalancer
191+
let lbs: string[] = exec(
192+
`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} get deployment -n loadbalancers -o=jsonpath="{.items[*].metadata.labels['gitpod\\.io\\/lbName']}"`,
193+
{ silent: true },
194+
)
195+
.stdout.trim()
196+
.split(" ");
197+
let previews = exec(
198+
`kubectl --kubeconfig ${HARVESTER_KUBECONFIG_PATH} get namespaces -o go-template --template '{{range .items}}{{.metadata.name}}{{"\\n"}}{{end}}' | awk '/(preview-.*)/ { print $1 }'`,
199+
{ silent: true },
200+
)
201+
.stdout.trim()
202+
.split("\n");
203+
let previewSet = new Set(previews);
204+
lbsToDelete = lbs.filter((lb) => !previewSet.has("preview-" + lb));
205+
lbsToDelete.forEach((lb) => werft.log(fetchPhase, "will delete " + lb));
206+
} catch (err) {
207+
werft.fail(fetchPhase, err);
208+
}
209+
210+
werft.phase(deletionPhase);
211+
try {
212+
lbsToDelete.forEach((lb) => {
213+
werft.log(deletionPhase, "deleteing " + lb);
214+
exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete deployment lb-${lb}`);
215+
exec(`kubectl --kubeconfig ${CORE_DEV_KUBECONFIG_PATH} -n loadbalancers delete service lb-${lb}`);
216+
});
217+
} catch (err) {
218+
werft.fail(deletionPhase, err);
219+
}
220+
werft.done(deletionPhase);
221+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
pod:
2+
serviceAccount: werft
3+
affinity:
4+
nodeAffinity:
5+
requiredDuringSchedulingIgnoredDuringExecution:
6+
nodeSelectorTerms:
7+
- matchExpressions:
8+
- key: dev/workload
9+
operator: In
10+
values:
11+
- "builds"
12+
volumes:
13+
- name: gcp-sa
14+
secret:
15+
secretName: gcp-sa-gitpod-dev-deployer
16+
- name: harvester-kubeconfig
17+
secret:
18+
secretName: harvester-kubeconfig
19+
- name: harvester-k3s-dockerhub-pull-account
20+
secret:
21+
secretName: harvester-k3s-dockerhub-pull-account
22+
- name: harvester-vm-ssh-keys
23+
secret:
24+
secretName: harvester-vm-ssh-keys
25+
containers:
26+
- name: build
27+
image: eu.gcr.io/gitpod-core-dev/dev/dev-environment:pd-dev-image-gcloud.2
28+
workingDir: /workspace
29+
imagePullPolicy: IfNotPresent
30+
volumeMounts:
31+
- name: gcp-sa
32+
mountPath: /mnt/secrets/gcp-sa
33+
readOnly: true
34+
- name: harvester-kubeconfig
35+
mountPath: /mnt/secrets/harvester-kubeconfig
36+
- name: harvester-vm-ssh-keys
37+
mountPath: /mnt/secrets/harvester-vm-ssh-keys
38+
- name: harvester-k3s-dockerhub-pull-account
39+
mountPath: /mnt/secrets/harvester-k3s-dockerhub-pull-account
40+
env:
41+
- name: HONEYCOMB_DATASET
42+
value: "werft"
43+
- name: HONEYCOMB_API_KEY
44+
valueFrom:
45+
secretKeyRef:
46+
name: honeycomb-api-key
47+
key: apikey
48+
command:
49+
- bash
50+
- -c
51+
- |
52+
sleep 1
53+
set -Eeuo pipefail
54+
55+
sudo chown -R gitpod:gitpod /workspace
56+
mkdir /workspace/.ssh
57+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa /workspace/.ssh/id_rsa_harvester_vm
58+
cp /mnt/secrets/harvester-vm-ssh-keys/id_rsa.pub /workspace/.ssh/id_rsa_harvester_vm.pub
59+
sudo chmod 600 /workspace/.ssh/id_rsa_harvester_vm
60+
sudo chmod 644 /workspace/.ssh/id_rsa_harvester_vm.pub
61+
62+
(cd .werft && yarn install && mv node_modules ..) | werft log slice prep
63+
printf '{{ toJson . }}' > context.json
64+
65+
npx ts-node .werft/platform-delete-preview-environment.ts

0 commit comments

Comments
 (0)