forked from app-sre/deployment-validation-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcsv-generate.sh
executable file
·181 lines (158 loc) · 7.34 KB
/
csv-generate.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
#!/usr/bin/env bash
set -e
source `dirname $0`/common.sh
usage() { echo "Usage: $0 -o operator-name -c saas-repository-channel -H operator-commit-hash -n operator-commit-number -i operator-image -V operator-version" 1>&2; exit 1; }
# TODO : Add support of long-options
while getopts "c:dg:H:i:n:o:V:" option; do
case "${option}" in
c)
operator_channel=${OPTARG}
;;
H)
operator_commit_hash=${OPTARG}
;;
i)
# This should be $OPERATOR_IMAGE from standard.mk
# I.e. the URL to the image repository with *no* tag.
operator_image=${OPTARG}
;;
n)
operator_commit_number=${OPTARG}
;;
o)
operator_name=${OPTARG}
;;
V)
# This should be $OPERATOR_VERSION from standard.mk:
# `{major}.{minor}.{commit-number}-{hash}`
# Notably, it does *not* start with `v`.
operator_version=${OPTARG}
;;
*)
usage
esac
done
# Checking parameters
check_mandatory_params operator_channel operator_image operator_version operator_name operator_commit_hash operator_commit_number
# Use set container engine or select one from available binaries
if [[ -z "$CONTAINER_ENGINE" ]]; then
CONTAINER_ENGINE=$(command -v podman || command -v docker || true)
fi
if [[ -z "$CONTAINER_ENGINE" ]]; then
YQ_CMD="yq"
else
yq_image="quay.io/app-sre/yq:3.4.1"
$CONTAINER_ENGINE pull $yq_image
YQ_CMD="$CONTAINER_ENGINE run --rm -i $yq_image yq"
fi
# Get the image URI as repo URL + image digest
IMAGE_DIGEST=$(skopeo inspect docker://${operator_image}:v${operator_version} | jq -r .Digest)
if [[ -z "$IMAGE_DIGEST" ]]; then
echo "Couldn't discover IMAGE_DIGEST for docker://${operator_image}:v${operator_version}!"
exit 1
fi
REPO_DIGEST=${operator_image}@${IMAGE_DIGEST}
# If no override, using the gitlab repo
if [ -z "$GIT_PATH" ] ; then
GIT_PATH="https://app:"${APP_SRE_BOT_PUSH_TOKEN}"@gitlab.cee.redhat.com/service/saas-${operator_name}-bundle.git"
fi
# Calculate previous version
SAAS_OPERATOR_DIR="saas-${operator_name}-bundle"
BUNDLE_DIR="$SAAS_OPERATOR_DIR/${operator_name}/"
rm -rf "$SAAS_OPERATOR_DIR"
git clone --branch "$operator_channel" ${GIT_PATH} "$SAAS_OPERATOR_DIR"
# If this is a brand new SaaS setup, then set up accordingly
if [[ ! -d "${BUNDLE_DIR}" ]]; then
echo "Setting up new SaaS operator dir: ${BUNDLE_DIR}"
mkdir "${BUNDLE_DIR}"
fi
# For testing purposes, support disabling anything that relies on
# querying the saas file in app-interface. This includes pruning
# undeployed commits in production.
# FIXME -- This should go away when we're querying app-interface via
# graphql.
if [[ -z "$SKIP_SAAS_FILE_CHECKS" ]]; then
# PATH to saas file in app-interface
SAAS_FILE_URL="https://gitlab.cee.redhat.com/service/app-interface/raw/master/data/services/osd-operators/cicd/saas/saas-${operator_name}.yaml"
# MANAGED_RESOURCE_TYPE
# SAAS files contain the type of resources managed within the OC templates that
# are being applied to hive.
# For customer cluster resources this should always be of type "SelectorSyncSet" resources otherwise
# can't be sync'd to the customer cluster. We're explicity selecting the first element in the array.
# We can safely assume anything that is not of type "SelectorSyncSet" is being applied to hive only
# since it matches ClusterDeployment resources.
# From this we'll assume that the namespace reference in resourceTemplates to be:
# For customer clusters: /services/osd-operators/namespace/<hive shard>/namespaces/cluster-scope.yaml
# For hive clusters: /services/osd-operators/namespace/<hive shard>/namespaces/<namespace name>.yaml
MANAGED_RESOURCE_TYPE=$(curl -s "${SAAS_FILE_URL}" | \
$YQ_CMD r - "managedResourceTypes[0]"
)
if [[ "${MANAGED_RESOURCE_TYPE}" == "" ]]; then
echo "Unabled to determine if SAAS file managed resource type"
exit 1
fi
# Determine namespace reference path, output resource type
if [[ "${MANAGED_RESOURCE_TYPE}" == "SelectorSyncSet" ]]; then
echo "SAAS file is NOT applied to Hive, MANAGED_RESOURCE_TYPE=$MANAGED_RESOURCE_TYPE"
resource_template_ns_path="/services/osd-operators/namespaces/hivep01ue1/cluster-scope.yml"
else
echo "SAAS file is applied to Hive, MANAGED_RESOURCE_TYPE=$MANAGED_RESOURCE_TYPE"
resource_template_ns_path="/services/osd-operators/namespaces/hivep01ue1/${operator_name}.yml"
fi
# remove any versions more recent than deployed hash
if [[ "$operator_channel" == "production" ]]; then
if [ -z "$DEPLOYED_HASH" ] ; then
DEPLOYED_HASH=$(
curl -s "${SAAS_FILE_URL}" | \
$YQ_CMD r - "resourceTemplates[*].targets(namespace.\$ref==${resource_template_ns_path}).ref"
)
fi
# Ensure that our query for the current deployed hash worked
# Validate that our DEPLOYED_HASH var isn't empty.
# Although we have `set -e` defined the docker container isn't returning
# an error and allowing the script to continue
echo "Current deployed production HASH: $DEPLOYED_HASH"
if [[ ! "${DEPLOYED_HASH}" =~ [0-9a-f]{40} ]]; then
echo "Error discovering current production deployed HASH"
exit 1
fi
delete=false
# Sort based on commit number
for version in $(ls $BUNDLE_DIR | sort -t . -k 3 -g); do
# skip if not directory
[ -d "$BUNDLE_DIR/$version" ] || continue
if [[ "$delete" == false ]]; then
short_hash=$(echo "$version" | cut -d- -f2)
if [[ "$DEPLOYED_HASH" == "${short_hash}"* ]]; then
delete=true
fi
else
rm -rf "${BUNDLE_DIR:?BUNDLE_DIR var not set}/$version"
fi
done
fi
fi # End of SKIP_SAAS_FILE_CHECKS granny switch
OPERATOR_PREV_VERSION=$(ls "$BUNDLE_DIR" | sort -t . -k 3 -g | tail -n 1)
OPERATOR_NEW_VERSION="${operator_version}"
OUTPUT_DIR=${BUNDLE_DIR}
# If setting up a new SaaS repo, there is no previous version when building a bundle
# Optionally pass it to the bundle generator in that case.
if [[ -z "${OPERATOR_PREV_VERSION}" ]]; then
PREV_VERSION_OPTS=""
else
PREV_VERSION_OPTS="-p ${OPERATOR_PREV_VERSION}"
fi
# Jenkins can't be relied upon to have py3, so run the generator in
# a container.
# ...Unless we're already in a container, which is how boilerplate
# CI runs. We have py3 there, so run natively in that case.
if [[ -z "$CONTAINER_ENGINE" ]]; then
./boilerplate/openshift/golang-osd-operator/csv-generate/common-generate-operator-bundle.py -o ${operator_name} -d ${OUTPUT_DIR} ${PREV_VERSION_OPTS} -i ${REPO_DIGEST} -V ${operator_version}
else
if [[ ${CONTAINER_ENGINE##*/} == "podman" ]]; then
CE_OPTS="--userns keep-id -v `pwd`:`pwd`:Z"
else
CE_OPTS="-v `pwd`:`pwd`"
fi
$CONTAINER_ENGINE run --pull=always --rm ${CE_OPTS} -u `id -u`:0 -w `pwd` registry.access.redhat.com/ubi8/python-36 /bin/bash -c "python -m pip install --disable-pip-version-check oyaml; python ./boilerplate/openshift/golang-osd-operator/csv-generate/common-generate-operator-bundle.py -o ${operator_name} -d ${OUTPUT_DIR} ${PREV_VERSION_OPTS} -i ${REPO_DIGEST} -V ${operator_version}"
fi