Skip to content

Ability to set pod environment variables on cluster resource #1794

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
Apr 11, 2022
6 changes: 6 additions & 0 deletions charts/postgres-operator/crds/postgresqls.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,12 @@ spec:
type: boolean
enableShmVolume:
type: boolean
env:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
init_containers:
type: array
description: deprecated
Expand Down
23 changes: 23 additions & 0 deletions docs/administrator.md
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,29 @@ data:
The key-value pairs of the Secret are all accessible as environment variables
to the Postgres StatefulSet/pods.

### For individual cluster

It is possible to define environment variables directly in the Postgres cluster
manifest to configure it individually. The variables must be listed under the
`env` section in the same way you would do for [containers](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/).
Global parameters served from a custom config map or secret will be overridden.

```yaml
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: acid-test-cluster
spec:
env:
- name: wal_s3_bucket
value: my-custom-bucket
- name: minio_secret_key
valueFrom:
secretKeyRef:
name: my-custom-secret
key: minio_secret_key
```

## Limiting the number of min and max instances in clusters

As a preventive measure, one can restrict the minimum and the maximum number of
Expand Down
6 changes: 5 additions & 1 deletion manifests/complete-postgres-manifest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ spec:
shared_buffers: "32MB"
max_connections: "10"
log_statement: "all"
# env:
# - name: wal_s3_bucket
# value: my-custom-bucket

volume:
size: 1Gi
# storageClass: my-sc
Expand Down Expand Up @@ -120,7 +124,7 @@ spec:
# database: foo
# plugin: pgoutput
ttl: 30
loop_wait: &loop_wait 10
loop_wait: 10
retry_timeout: 10
synchronous_mode: false
synchronous_mode_strict: false
Expand Down
6 changes: 6 additions & 0 deletions manifests/postgresql.crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,12 @@ spec:
type: boolean
enableShmVolume:
type: boolean
env:
type: array
nullable: true
items:
type: object
x-kubernetes-preserve-unknown-fields: true
init_containers:
type: array
description: deprecated
Expand Down
10 changes: 10 additions & 0 deletions pkg/apis/acid.zalan.do/v1/crds.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,16 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
"enableShmVolume": {
Type: "boolean",
},
"env": {
Type: "array",
Nullable: true,
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
XPreserveUnknownFields: util.True(),
},
},
},
"init_containers": {
Type: "array",
Description: "deprecated",
Expand Down
1 change: 1 addition & 0 deletions pkg/apis/acid.zalan.do/v1/postgresql_type.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ type PostgresSpec struct {
TLS *TLSDescription `json:"tls,omitempty"`
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
Streams []Stream `json:"streams,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"`

// deprecated json tags
InitContainersOld []v1.Container `json:"init_containers,omitempty"`
Expand Down
7 changes: 7 additions & 0 deletions pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

102 changes: 45 additions & 57 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -649,8 +649,7 @@ func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, su
},
},
}
mergedEnv := append(env, container.Env...)
container.Env = deduplicateEnvVars(mergedEnv, container.Name, logger)
container.Env = appendEnvVars(env, container.Env...)
result = append(result, container)
}

Expand Down Expand Up @@ -769,6 +768,7 @@ func (c *Cluster) generateSpiloPodEnvVars(
cloneDescription *acidv1.CloneDescription,
standbyDescription *acidv1.StandbyDescription,
customPodEnvVarsList []v1.EnvVar) []v1.EnvVar {

envVars := []v1.EnvVar{
{
Name: "SCOPE",
Expand Down Expand Up @@ -843,6 +843,11 @@ func (c *Cluster) generateSpiloPodEnvVars(
Value: c.OpConfig.PamRoleName,
},
}

if c.OpConfig.EnableSpiloWalPathCompat {
envVars = append(envVars, v1.EnvVar{Name: "ENABLE_WAL_PATH_COMPAT", Value: "true"})
}

if c.OpConfig.EnablePgVersionEnvVar {
envVars = append(envVars, v1.EnvVar{Name: "PGVERSION", Value: c.GetDesiredMajorVersion()})
}
Expand Down Expand Up @@ -874,73 +879,67 @@ func (c *Cluster) generateSpiloPodEnvVars(
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
}

if len(c.Spec.Env) > 0 {
envVars = appendEnvVars(envVars, c.Spec.Env...)
}

// add vars taken from pod_environment_configmap and pod_environment_secret first
// (to allow them to override the globals set in the operator config)
if len(customPodEnvVarsList) > 0 {
envVars = append(envVars, customPodEnvVarsList...)
envVars = appendEnvVars(envVars, customPodEnvVarsList...)
}

if c.OpConfig.WALES3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_S3_BUCKET", Value: c.OpConfig.WALES3Bucket})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

if c.OpConfig.WALGSBucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_GS_BUCKET", Value: c.OpConfig.WALGSBucket})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

if c.OpConfig.WALAZStorageAccount != "" {
envVars = append(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}

if c.OpConfig.GCPCredentials != "" {
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
}

if c.OpConfig.LogS3Bucket != "" {
envVars = append(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_S3_BUCKET", Value: c.OpConfig.LogS3Bucket})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = appendEnvVars(envVars, v1.EnvVar{Name: "LOG_BUCKET_SCOPE_PREFIX", Value: ""})
}

return envVars
}

// deduplicateEnvVars makes sure there are no duplicate in the target envVar array. While Kubernetes already
// deduplicates variables defined in a container, it leaves the last definition in the list and this behavior is not
// well-documented, which means that the behavior can be reversed at some point (it may also start producing an error).
// Therefore, the merge is done by the operator, the entries that are ahead in the passed list take priority over those
// that are behind, and only the name is considered in order to eliminate duplicates.
func deduplicateEnvVars(input []v1.EnvVar, containerName string, logger *logrus.Entry) []v1.EnvVar {
result := make([]v1.EnvVar, 0)
names := make(map[string]int)

for i, va := range input {
if names[va.Name] == 0 {
names[va.Name]++
result = append(result, input[i])
} else if names[va.Name] == 1 {
names[va.Name]++

// Some variables (those to configure the WAL_ and LOG_ shipping) may be overwritten, only log as info
if strings.HasPrefix(va.Name, "WAL_") || strings.HasPrefix(va.Name, "LOG_") {
logger.Infof("global variable %q has been overwritten by configmap/secret for container %q",
va.Name, containerName)
} else {
logger.Warningf("variable %q is defined in %q more than once, the subsequent definitions are ignored",
va.Name, containerName)
}
func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar {
jenvs := envs
for _, env := range appEnv {
if !isEnvVarPresent(jenvs, env.Name) {
jenvs = append(jenvs, env)
}
}
return result
return jenvs
}

func isEnvVarPresent(envs []v1.EnvVar, key string) bool {
for _, env := range envs {
if env.Name == key {
return true
}
}
return false
}

// Return list of variables the pod recieved from the configured ConfigMap
// Return list of variables the pod received from the configured ConfigMap
func (c *Cluster) getPodEnvironmentConfigMapVariables() ([]v1.EnvVar, error) {
configMapPodEnvVarsList := make([]v1.EnvVar, 0)

Expand Down Expand Up @@ -1105,16 +1104,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
initContainers = spec.InitContainers
}

spiloCompathWalPathList := make([]v1.EnvVar, 0)
if c.OpConfig.EnableSpiloWalPathCompat {
spiloCompathWalPathList = append(spiloCompathWalPathList,
v1.EnvVar{
Name: "ENABLE_WAL_PATH_COMPAT",
Value: "true",
},
)
}

// fetch env vars from custom ConfigMap
configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables()
if err != nil {
Expand All @@ -1128,8 +1117,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
}

// concat all custom pod env vars and sort them
customPodEnvVarsList := append(spiloCompathWalPathList, configMapEnvVarsList...)
customPodEnvVarsList = append(customPodEnvVarsList, secretEnvVarsList...)
customPodEnvVarsList := append(configMapEnvVarsList, secretEnvVarsList...)
sort.Slice(customPodEnvVarsList,
func(i, j int) bool { return customPodEnvVarsList[i].Name < customPodEnvVarsList[j].Name })

Expand Down Expand Up @@ -1210,7 +1198,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// use the same filenames as Secret resources by default
certFile := ensurePath(spec.TLS.CertificateFile, mountPath, "tls.crt")
privateKeyFile := ensurePath(spec.TLS.PrivateKeyFile, mountPath, "tls.key")
spiloEnvVars = append(
spiloEnvVars = appendEnvVars(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CERTIFICATE_FILE", Value: certFile},
v1.EnvVar{Name: "SSL_PRIVATE_KEY_FILE", Value: privateKeyFile},
Expand All @@ -1224,7 +1212,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
}

caFile := ensurePath(spec.TLS.CAFile, mountPathCA, "")
spiloEnvVars = append(
spiloEnvVars = appendEnvVars(
spiloEnvVars,
v1.EnvVar{Name: "SSL_CA_FILE", Value: caFile},
)
Expand All @@ -1249,7 +1237,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
spiloContainer := generateContainer(constants.PostgresContainerName,
&effectiveDockerImage,
resourceRequirements,
deduplicateEnvVars(spiloEnvVars, constants.PostgresContainerName, c.logger),
spiloEnvVars,
volumeMounts,
c.OpConfig.Resources.SpiloPrivileged,
c.OpConfig.Resources.SpiloAllowPrivilegeEscalation,
Expand Down
Loading