|
| 1 | +package aggregated_logging |
| 2 | + |
| 3 | +import ( |
| 4 | + "errors" |
| 5 | + "fmt" |
| 6 | + "net/url" |
| 7 | + |
| 8 | + kapi "k8s.io/kubernetes/pkg/api" |
| 9 | + kapisext "k8s.io/kubernetes/pkg/apis/extensions" |
| 10 | + kclient "k8s.io/kubernetes/pkg/client/unversioned" |
| 11 | + "k8s.io/kubernetes/pkg/labels" |
| 12 | + |
| 13 | + authapi "github.com/openshift/origin/pkg/authorization/api" |
| 14 | + "github.com/openshift/origin/pkg/client" |
| 15 | + configapi "github.com/openshift/origin/pkg/cmd/server/api" |
| 16 | + deployapi "github.com/openshift/origin/pkg/deploy/api" |
| 17 | + hostdiag "github.com/openshift/origin/pkg/diagnostics/host" |
| 18 | + "github.com/openshift/origin/pkg/diagnostics/types" |
| 19 | + routesapi "github.com/openshift/origin/pkg/route/api" |
| 20 | +) |
| 21 | + |
| 22 | +// AggregatedLogging is a Diagnostic to check the configurations |
| 23 | +// and general integration of the OpenShift stack |
| 24 | +// for aggregating container logs |
| 25 | +// https://github.com/openshift/origin-aggregated-logging |
| 26 | +type AggregatedLogging struct { |
| 27 | + masterConfig *configapi.MasterConfig |
| 28 | + MasterConfigFile string |
| 29 | + OsClient *client.Client |
| 30 | + KubeClient *kclient.Client |
| 31 | + result types.DiagnosticResult |
| 32 | +} |
| 33 | + |
| 34 | +const ( |
| 35 | + AggregatedLoggingName = "AggregatedLogging" |
| 36 | + |
| 37 | + loggingInfraKey = "logging-infra" |
| 38 | + componentKey = "component" |
| 39 | + providerKey = "provider" |
| 40 | + openshiftValue = "openshift" |
| 41 | + |
| 42 | + fluentdServiceAccountName = "aggregated-logging-fluentd" |
| 43 | +) |
| 44 | + |
| 45 | +var loggingSelector = labels.Set{loggingInfraKey: "support"} |
| 46 | + |
| 47 | +//NewAggregatedLogging returns the AggregatedLogging Diagnostic |
| 48 | +func NewAggregatedLogging(masterConfigFile string, kclient *kclient.Client, osclient *client.Client) *AggregatedLogging { |
| 49 | + return &AggregatedLogging{nil, masterConfigFile, osclient, kclient, types.NewDiagnosticResult(AggregatedLoggingName)} |
| 50 | +} |
| 51 | + |
| 52 | +func (d *AggregatedLogging) getScc(name string) (*kapi.SecurityContextConstraints, error) { |
| 53 | + return d.KubeClient.SecurityContextConstraints().Get(name) |
| 54 | +} |
| 55 | + |
| 56 | +func (d *AggregatedLogging) getClusterRoleBinding(name string) (*authapi.ClusterRoleBinding, error) { |
| 57 | + return d.OsClient.ClusterRoleBindings().Get(name) |
| 58 | +} |
| 59 | + |
| 60 | +func (d *AggregatedLogging) routes(project string, options kapi.ListOptions) (*routesapi.RouteList, error) { |
| 61 | + return d.OsClient.Routes(project).List(options) |
| 62 | +} |
| 63 | + |
| 64 | +func (d *AggregatedLogging) serviceAccounts(project string, options kapi.ListOptions) (*kapi.ServiceAccountList, error) { |
| 65 | + return d.KubeClient.ServiceAccounts(project).List(options) |
| 66 | +} |
| 67 | + |
| 68 | +func (d *AggregatedLogging) services(project string, options kapi.ListOptions) (*kapi.ServiceList, error) { |
| 69 | + return d.KubeClient.Services(project).List(options) |
| 70 | +} |
| 71 | + |
| 72 | +func (d *AggregatedLogging) endpointsForService(project string, service string) (*kapi.Endpoints, error) { |
| 73 | + return d.KubeClient.Endpoints(project).Get(service) |
| 74 | +} |
| 75 | + |
| 76 | +func (d *AggregatedLogging) daemonsets(project string, options kapi.ListOptions) (*kapisext.DaemonSetList, error) { |
| 77 | + return d.KubeClient.DaemonSets(project).List(kapi.ListOptions{LabelSelector: loggingInfraFluentdSelector.AsSelector()}) |
| 78 | +} |
| 79 | + |
| 80 | +func (d *AggregatedLogging) nodes(options kapi.ListOptions) (*kapi.NodeList, error) { |
| 81 | + return d.KubeClient.Nodes().List(kapi.ListOptions{}) |
| 82 | +} |
| 83 | + |
| 84 | +func (d *AggregatedLogging) pods(project string, options kapi.ListOptions) (*kapi.PodList, error) { |
| 85 | + return d.KubeClient.Pods(project).List(options) |
| 86 | +} |
| 87 | +func (d *AggregatedLogging) deploymentconfigs(project string, options kapi.ListOptions) (*deployapi.DeploymentConfigList, error) { |
| 88 | + return d.OsClient.DeploymentConfigs(project).List(options) |
| 89 | +} |
| 90 | + |
| 91 | +func (d *AggregatedLogging) Info(id string, message string) { |
| 92 | + d.result.Info(id, message) |
| 93 | +} |
| 94 | + |
| 95 | +func (d *AggregatedLogging) Error(id string, err error, message string) { |
| 96 | + d.result.Error(id, err, message) |
| 97 | +} |
| 98 | + |
| 99 | +func (d *AggregatedLogging) Debug(id string, message string) { |
| 100 | + d.result.Debug(id, message) |
| 101 | +} |
| 102 | + |
| 103 | +func (d *AggregatedLogging) Warn(id string, err error, message string) { |
| 104 | + d.result.Warn(id, err, message) |
| 105 | +} |
| 106 | + |
| 107 | +func (d *AggregatedLogging) Name() string { |
| 108 | + return AggregatedLoggingName |
| 109 | +} |
| 110 | + |
| 111 | +func (d *AggregatedLogging) Description() string { |
| 112 | + return "Check aggregated logging integration for proper configuration" |
| 113 | +} |
| 114 | + |
| 115 | +func (d *AggregatedLogging) CanRun() (bool, error) { |
| 116 | + if len(d.MasterConfigFile) == 0 { |
| 117 | + return false, errors.New("No master config file was provided") |
| 118 | + } |
| 119 | + if d.OsClient == nil { |
| 120 | + return false, errors.New("Config must include a cluster-admin context to run this diagnostic") |
| 121 | + } |
| 122 | + if d.KubeClient == nil { |
| 123 | + return false, errors.New("Config must include a cluster-admin context to run this diagnostic") |
| 124 | + } |
| 125 | + var err error |
| 126 | + d.masterConfig, err = hostdiag.GetMasterConfig(d.result, d.MasterConfigFile) |
| 127 | + if err != nil { |
| 128 | + return false, errors.New("Unreadable master config; skipping this diagnostic.") |
| 129 | + } |
| 130 | + return true, nil |
| 131 | +} |
| 132 | + |
| 133 | +func (d *AggregatedLogging) Check() types.DiagnosticResult { |
| 134 | + project := retrieveLoggingProject(d.result, d.masterConfig, d.OsClient) |
| 135 | + if len(project) != 0 { |
| 136 | + checkServiceAccounts(d, d, project) |
| 137 | + checkClusterRoleBindings(d, d, project) |
| 138 | + checkSccs(d, d, project) |
| 139 | + checkDeploymentConfigs(d, d, project) |
| 140 | + checkDaemonSets(d, d, project) |
| 141 | + checkServices(d, d, project) |
| 142 | + checkRoutes(d, d, project) |
| 143 | + checkKibana(d.result, d.OsClient, d.KubeClient, project) |
| 144 | + } |
| 145 | + return d.result |
| 146 | +} |
| 147 | + |
| 148 | +const projectNodeSelectorWarning = ` |
| 149 | +The project '%[1]s' was found with a non-empty node selector annotation. This will keep |
| 150 | +Fluentd from running on certain nodes and collecting logs from the entire cluster. You |
| 151 | +can correct it by editing the project: |
| 152 | +
|
| 153 | + oc edit namespace %[1]s |
| 154 | +
|
| 155 | +and updating the annotation: |
| 156 | +
|
| 157 | + 'openshift.io/node-selector' : "" |
| 158 | +
|
| 159 | +` |
| 160 | + |
| 161 | +func retrieveLoggingProject(r types.DiagnosticResult, masterCfg *configapi.MasterConfig, osClient *client.Client) string { |
| 162 | + r.Debug("AGL0010", fmt.Sprintf("masterConfig.AssetConfig.LoggingPublicURL: '%s'", masterCfg.AssetConfig.LoggingPublicURL)) |
| 163 | + projectName := "" |
| 164 | + if len(masterCfg.AssetConfig.LoggingPublicURL) == 0 { |
| 165 | + r.Debug("AGL0017", "masterConfig.AssetConfig.LoggingPublicURL is empty") |
| 166 | + return projectName |
| 167 | + } |
| 168 | + |
| 169 | + loggingUrl, err := url.Parse(masterCfg.AssetConfig.LoggingPublicURL) |
| 170 | + if err != nil { |
| 171 | + r.Error("AGL0011", err, fmt.Sprintf("Unable to parse the loggingPublicURL from the masterConfig '%s'", masterCfg.AssetConfig.LoggingPublicURL)) |
| 172 | + return projectName |
| 173 | + } |
| 174 | + |
| 175 | + routeList, err := osClient.Routes(kapi.NamespaceAll).List(kapi.ListOptions{LabelSelector: loggingSelector.AsSelector()}) |
| 176 | + if err != nil { |
| 177 | + r.Error("AGL0012", err, fmt.Sprintf("There was an error while trying to find the route associated with '%s' which is probably transient: %s", loggingUrl, err)) |
| 178 | + return projectName |
| 179 | + } |
| 180 | + |
| 181 | + for _, route := range routeList.Items { |
| 182 | + r.Debug("AGL0013", fmt.Sprintf("Comparing URL to route.Spec.Host: %s", route.Spec.Host)) |
| 183 | + if loggingUrl.Host == route.Spec.Host { |
| 184 | + if len(projectName) == 0 { |
| 185 | + projectName = route.ObjectMeta.Namespace |
| 186 | + r.Info("AGL0015", fmt.Sprintf("Found route '%s' matching logging URL '%s' in project: '%s'", route.ObjectMeta.Name, loggingUrl.Host, projectName)) |
| 187 | + } else { |
| 188 | + r.Warn("AGL0019", nil, fmt.Sprintf("Found additional route '%s' matching logging URL '%s' in project: '%s'. This could mean you have multiple logging deployments.", route.ObjectMeta.Name, loggingUrl.Host, projectName)) |
| 189 | + } |
| 190 | + } |
| 191 | + } |
| 192 | + if len(projectName) == 0 { |
| 193 | + message := fmt.Sprintf("Unable to find a route matching the loggingPublicURL defined in the master config '%s'. Check that the URL is correct and aggregated logging is deployed.", loggingUrl) |
| 194 | + r.Error("AGL0014", errors.New(message), message) |
| 195 | + return "" |
| 196 | + } |
| 197 | + project, err := osClient.Projects().Get(projectName) |
| 198 | + if err != nil { |
| 199 | + r.Error("AGL0018", err, fmt.Sprintf("There was an error retrieving project '%s' which is most likely a transient error: %s", projectName, err)) |
| 200 | + return "" |
| 201 | + } |
| 202 | + nodeSelector, ok := project.ObjectMeta.Annotations["openshift.io/node-selector"] |
| 203 | + if ok && len(nodeSelector) != 0 { |
| 204 | + r.Warn("AGL0030", nil, fmt.Sprintf(projectNodeSelectorWarning, projectName)) |
| 205 | + } |
| 206 | + return projectName |
| 207 | +} |
0 commit comments