Skip to content

Commit d5037c6

Browse files
Merge pull request #1270 from fpetkovski/indicate-no-storage
Bug 1978662: Set a degraded message when persistent storage is not configured
2 parents 2f35619 + 9c7cddc commit d5037c6

File tree

5 files changed

+25
-4
lines changed

5 files changed

+25
-4
lines changed

Diff for: CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
- [#1241](https://github.com/openshift/cluster-monitoring-operator/pull/1241) Add config option to disable Grafana deployment.
66
- [#1278](https://github.com/openshift/cluster-monitoring-operator/pull/1278) Add EnforcedTargetLimit option for user-workload Prometheus.
7+
- [#1270](https://github.com/openshift/cluster-monitoring-operator/pull/1270) Show a message in the degraded condition when Platform Monitoring Prometheus runs without persistent storage.
78

89
## 4.8
910

Diff for: pkg/client/status_reporter.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,8 @@ import (
2929
const (
3030
unavailableMessage string = "Rollout of the monitoring stack failed and is degraded. Please investigate the degraded status error."
3131
asExpectedReason string = "AsExpected"
32+
StorageNotConfiguredMessage = "Prometheus is running without persistent storage which can lead to data loss during upgrades and cluster disruptions. Please refer to the official documentation to see how to configure storage for Prometheus: https://docs.openshift.com/container-platform/4.8/monitoring/configuring-the-monitoring-stack.html"
33+
StorageNotConfiguredReason = "PrometheusDataPersistenceNotConfigured"
3234
)
3335

3436
type StatusReporter struct {
@@ -67,7 +69,7 @@ func (r *StatusReporter) relatedObjects() []v1.ObjectReference {
6769
}
6870
}
6971

70-
func (r *StatusReporter) SetDone() error {
72+
func (r *StatusReporter) SetDone(degradedConditionMessage string, degradedConditionReason string) error {
7173
co, err := r.client.Get(r.ctx, r.clusterOperatorName, metav1.GetOptions{})
7274
if apierrors.IsNotFound(err) {
7375
co = r.newClusterOperator()
@@ -82,7 +84,7 @@ func (r *StatusReporter) SetDone() error {
8284
conditions := newConditions(co.Status, r.version, time)
8385
conditions.setCondition(v1.OperatorAvailable, v1.ConditionTrue, "Successfully rolled out the stack.", "RollOutDone", time)
8486
conditions.setCondition(v1.OperatorProgressing, v1.ConditionFalse, "", "", time)
85-
conditions.setCondition(v1.OperatorDegraded, v1.ConditionFalse, "", "", time)
87+
conditions.setCondition(v1.OperatorDegraded, v1.ConditionFalse, degradedConditionMessage, degradedConditionReason, time)
8688
conditions.setCondition(v1.OperatorUpgradeable, v1.ConditionTrue, "", asExpectedReason, time)
8789
co.Status.Conditions = conditions.entries()
8890

Diff for: pkg/client/status_reporter_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ func TestStatusReporterSetDone(t *testing.T) {
113113
w(mock)
114114
}
115115

116-
got := sr.SetDone()
116+
got := sr.SetDone("", "")
117117

118118
for _, check := range tc.check {
119119
if err := check(mock, got); err != nil {

Diff for: pkg/manifests/config.go

+13
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,19 @@ type Config struct {
3838
UserWorkloadConfiguration *UserWorkloadConfiguration `json:"-"`
3939
}
4040

41+
func (c Config) IsStorageConfigured() bool {
42+
if c.ClusterMonitoringConfiguration == nil {
43+
return false
44+
}
45+
46+
prometheusK8sConfig := c.ClusterMonitoringConfiguration.PrometheusK8sConfig
47+
if prometheusK8sConfig == nil {
48+
return false
49+
}
50+
51+
return prometheusK8sConfig.VolumeClaimTemplate != nil
52+
}
53+
4154
type ClusterMonitoringConfiguration struct {
4255
PrometheusOperatorConfig *PrometheusOperatorConfig `json:"prometheusOperator"`
4356
PrometheusK8sConfig *PrometheusK8sConfig `json:"prometheusK8s"`

Diff for: pkg/operator/operator.go

+6-1
Original file line numberDiff line numberDiff line change
@@ -481,9 +481,14 @@ func (o *Operator) sync(key string) error {
481481
return err
482482
}
483483

484+
var degradedConditionMessage, degradedConditionReason string
485+
if !config.IsStorageConfigured() {
486+
degradedConditionMessage = client.StorageNotConfiguredMessage
487+
degradedConditionReason = client.StorageNotConfiguredReason
488+
}
484489
klog.Info("Updating ClusterOperator status to done.")
485490
o.failedReconcileAttempts = 0
486-
err = o.client.StatusReporter().SetDone()
491+
err = o.client.StatusReporter().SetDone(degradedConditionMessage, degradedConditionReason)
487492
if err != nil {
488493
klog.Errorf("error occurred while setting status to done: %v", err)
489494
}

0 commit comments

Comments
 (0)