Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cluster version condition #524

Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module github.com/openshift/insights-operator
go 1.16

require (
github.com/blang/semver/v4 v4.0.0
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/google/gofuzz v1.2.0 // indirect
github.com/openshift/api v0.0.0-20210901140736-d8ed1449662d
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
Expand Down
2 changes: 1 addition & 1 deletion pkg/gather/gather.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func CreateAllGatherers(
gatherKubeConfig, gatherProtoKubeConfig, metricsGatherKubeConfig, anonymizer, controller.Interval,
)
workloadsGatherer := workloads.New(gatherProtoKubeConfig)
conditionalGatherer := conditional.New(gatherProtoKubeConfig, metricsGatherKubeConfig)
conditionalGatherer := conditional.New(gatherProtoKubeConfig, metricsGatherKubeConfig, gatherKubeConfig)

return []gatherers.Interface{clusterConfigGatherer, workloadsGatherer, conditionalGatherer}
}
Expand Down
145 changes: 114 additions & 31 deletions pkg/gatherers/conditional/conditional_gatherer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
// they can be fetched from outside, checked that they make sense (we want to check the parameters, for example if
// a rule tells to collect logs of a namespace on firing alert, we want to check that the namespace is created
// by openshift and not by a user). Conditional gathering isn't considered prioritized, so we run it every 6 hours.
//
// To add a new condition, follow the next steps:
// 1. Add structures to conditions.go
// 2. Change areAllConditionsSatisfied function in conditional_gatherer.go
package conditional

import (
Expand All @@ -12,7 +16,10 @@ import (
"sort"
"strings"

"github.com/blang/semver/v4"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
"github.com/prometheus/common/expfmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"

Expand Down Expand Up @@ -158,13 +165,15 @@ type Gatherer struct {
gatherProtoKubeConfig *rest.Config
metricsGatherKubeConfig *rest.Config
imageKubeConfig *rest.Config
gatherKubeConfig *rest.Config
// there can be multiple instances of the same alert
firingAlerts map[string][]AlertLabels
gatheringRules []GatheringRule
clusterVersion string
}

// New creates a new instance of conditional gatherer with the appropriate configs
func New(gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config) *Gatherer {
func New(gatherProtoKubeConfig, metricsGatherKubeConfig, gatherKubeConfig *rest.Config) *Gatherer {
var imageKubeConfig *rest.Config
if gatherProtoKubeConfig != nil {
// needed for getting image streams
Expand All @@ -177,10 +186,18 @@ func New(gatherProtoKubeConfig, metricsGatherKubeConfig *rest.Config) *Gatherer
gatherProtoKubeConfig: gatherProtoKubeConfig,
metricsGatherKubeConfig: metricsGatherKubeConfig,
imageKubeConfig: imageKubeConfig,
gatherKubeConfig: gatherKubeConfig,
gatheringRules: defaultGatheringRules,
}
}

// GatheringRuleMetadata stores information about gathering rules
type GatheringRuleMetadata struct {
Rule GatheringRule `json:"rule"`
Errors []error `json:"errors"`
WasTriggered bool `json:"was_triggered"`
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great. I like this idea!

}

// GetName returns the name of the gatherer
func (g *Gatherer) GetName() string {
return "conditional"
Expand All @@ -194,46 +211,53 @@ func (g *Gatherer) GetGatheringFunctions(ctx context.Context) (map[string]gather
return nil, fmt.Errorf("got invalid config for conditional gatherer: %v", utils.SumErrors(errs))
}

err := g.updateAlertsCache(ctx)
if err != nil {
return nil, fmt.Errorf("conditional gatherer can't update alerts cache: %v", err)
}
g.updateCache(ctx)

gatheringFunctions := make(map[string]gatherers.GatheringClosure)

gatheringFunctions["conditional_gatherer_rules"] = gatherers.GatheringClosure{
Run: g.GatherConditionalGathererRules,
CanFail: canConditionalGathererFail,
}
var metadata []GatheringRuleMetadata

for _, conditionalGathering := range g.gatheringRules {
ruleMetadata := GatheringRuleMetadata{
Rule: conditionalGathering,
}

allConditionsAreSatisfied, err := g.areAllConditionsSatisfied(conditionalGathering.Conditions)
if err != nil {
return nil, err
klog.Errorf("error checking conditions for a gathering rule: %v", err)
ruleMetadata.Errors = append(ruleMetadata.Errors, err)
}

ruleMetadata.WasTriggered = allConditionsAreSatisfied

if allConditionsAreSatisfied {
functions, errs := g.createGatheringClosures(conditionalGathering.GatheringFunctions)
if len(errs) > 0 {
return nil, err
klog.Errorf("error(s) creating a closure for a gathering rule: %v", errs)
ruleMetadata.Errors = append(ruleMetadata.Errors, errs...)
}

for funcName, function := range functions {
gatheringFunctions[funcName] = function
}
}
}

return gatheringFunctions, nil
}
metadata = append(metadata, ruleMetadata)
}

// GatherConditionalGathererRules stores the gathering rules in insights-operator/conditional-gatherer-rules.json
func (g *Gatherer) GatherConditionalGathererRules(context.Context) ([]record.Record, []error) {
return []record.Record{
{
Name: "insights-operator/conditional-gatherer-rules",
Item: record.JSONMarshaller{Object: g.gatheringRules},
gatheringFunctions["conditional_gatherer_rules"] = gatherers.GatheringClosure{
Run: func(context.Context) ([]record.Record, []error) {
return []record.Record{
{
Name: "insights-operator/conditional-gatherer-rules",
Item: record.JSONMarshaller{Object: metadata},
},
}, nil
},
}, nil
CanFail: canConditionalGathererFail,
}

return gatheringFunctions, nil
}

// areAllConditionsSatisfied returns true if all the conditions are satisfied, for example if the condition is
Expand All @@ -246,8 +270,16 @@ func (g *Gatherer) areAllConditionsSatisfied(conditions []ConditionWithParams) (
return false, fmt.Errorf("alert field should not be nil")
}

if !g.isAlertFiring(condition.Alert.Name) {
return false, nil
if firing, err := g.isAlertFiring(condition.Alert.Name); !firing || err != nil {
return false, err
}
case ClusterVersionMatches:
if condition.ClusterVersionMatches == nil {
return false, fmt.Errorf("cluster_version_matches field should not be nil")
}

if doesMatch, err := g.doesClusterVersionMatch(condition.ClusterVersionMatches.Version); !doesMatch || err != nil {
return false, err
}
default:
return false, fmt.Errorf("unknown condition type: %v", condition.Type)
Expand All @@ -257,21 +289,31 @@ func (g *Gatherer) areAllConditionsSatisfied(conditions []ConditionWithParams) (
return true, nil
}

// updateAlertsCache updates the cache with firing alerts
func (g *Gatherer) updateAlertsCache(ctx context.Context) error {
// updateCache updates alerts and version caches
func (g *Gatherer) updateCache(ctx context.Context) {
if g.metricsGatherKubeConfig == nil {
return nil
return
}

metricsClient, err := rest.RESTClientFor(g.metricsGatherKubeConfig)
if err != nil {
return err
klog.Errorf("unable to update alerts cache: %v", err)
} else if err := g.updateAlertsCache(ctx, metricsClient); err != nil { //nolint:govet
klog.Errorf("unable to update alerts cache: %v", err)
g.firingAlerts = nil
}

configClient, err := configv1client.NewForConfig(g.gatherKubeConfig)
if err != nil {
klog.Errorf("unable to update version cache: %v", err)
} else if err := g.updateVersionCache(ctx, configClient); err != nil {
klog.Errorf("unable to update version cache: %v", err)
g.clusterVersion = ""
}

return g.updateAlertsCacheFromClient(ctx, metricsClient)
}

func (g *Gatherer) updateAlertsCacheFromClient(ctx context.Context, metricsClient rest.Interface) error {
func (g *Gatherer) updateAlertsCache(ctx context.Context, metricsClient rest.Interface) error {
const logPrefix = "conditional gatherer: "

g.firingAlerts = make(map[string][]AlertLabels)
Expand All @@ -282,20 +324,24 @@ func (g *Gatherer) updateAlertsCacheFromClient(ctx context.Context, metricsClien
if err != nil {
return err
}

var parser expfmt.TextParser
metricFamilies, err := parser.TextToMetricFamilies(bytes.NewReader(data))
if err != nil {
return err
}

if len(metricFamilies) > 1 {
// just log cuz everything would still work
klog.Warning(logPrefix + "unexpected output from prometheus metrics parser")
}

metricFamily, found := metricFamilies["ALERTS"]
if !found {
klog.Info(logPrefix + "no alerts are firing")
return nil
}

for _, metric := range metricFamily.GetMetric() {
if metric == nil {
klog.Info(logPrefix + "metric is nil")
Expand All @@ -320,10 +366,47 @@ func (g *Gatherer) updateAlertsCacheFromClient(ctx context.Context, metricsClien
return nil
}

func (g *Gatherer) updateVersionCache(ctx context.Context, configClient configv1client.ConfigV1Interface) error {
clusterVersion, err := configClient.ClusterVersions().Get(ctx, "version", metav1.GetOptions{})
if err != nil {
return err
}

g.clusterVersion = clusterVersion.Status.Desired.Version

return nil
}

// isAlertFiring using the cache it returns true if the alert is firing
func (g *Gatherer) isAlertFiring(alertName string) bool {
func (g *Gatherer) isAlertFiring(alertName string) (bool, error) {
if g.firingAlerts == nil {
return false, fmt.Errorf("alerts cache is missing")
}

_, alertIsFiring := g.firingAlerts[alertName]
return alertIsFiring
return alertIsFiring, nil
}

func (g *Gatherer) doesClusterVersionMatch(expectedVersionExpression string) (bool, error) {
if len(g.clusterVersion) == 0 {
return false, fmt.Errorf("cluster version is missing")
}

clusterVersion, err := semver.Parse(g.clusterVersion)
if err != nil {
return false, err
}

expectedRange, err := semver.ParseRange(expectedVersionExpression)
if err != nil {
return false, err
}

// ignore everything after the first three numbers
clusterVersion.Pre = nil
clusterVersion.Build = nil

return expectedRange(clusterVersion), nil
}

// createGatheringClosures produces gathering closures from the rules
Expand Down
Loading