@@ -3,7 +3,6 @@ package aggregated_logging
3
3
import (
4
4
"errors"
5
5
"fmt"
6
- "net/url"
7
6
8
7
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9
8
"k8s.io/apimachinery/pkg/labels"
@@ -15,26 +14,24 @@ import (
15
14
appstypedclient "github.com/openshift/origin/pkg/apps/generated/internalclientset/typed/apps/internalversion"
16
15
authapi "github.com/openshift/origin/pkg/authorization/apis/authorization"
17
16
oauthorizationtypedclient "github.com/openshift/origin/pkg/authorization/generated/internalclientset/typed/authorization/internalversion"
18
- configapi "github.com/openshift/origin/pkg/cmd/server/apis/config"
19
17
oauthtypedclient "github.com/openshift/origin/pkg/oauth/generated/internalclientset/typed/oauth/internalversion"
20
- hostdiag "github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/host"
21
18
"github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/log"
22
19
"github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/types"
23
20
projecttypedclient "github.com/openshift/origin/pkg/project/generated/internalclientset/typed/project/internalversion"
24
21
routesapi "github.com/openshift/origin/pkg/route/apis/route"
25
22
routetypedclient "github.com/openshift/origin/pkg/route/generated/internalclientset/typed/route/internalversion"
26
23
securityapi "github.com/openshift/origin/pkg/security/apis/security"
27
24
securitytypedclient "github.com/openshift/origin/pkg/security/generated/internalclientset/typed/security/internalversion"
25
+ kerrors "k8s.io/apimachinery/pkg/api/errors"
26
+ "strings"
28
27
)
29
28
30
29
// AggregatedLogging is a Diagnostic to check the configurations
31
30
// and general integration of the OpenShift stack
32
31
// for aggregating container logs
33
32
// https://github.com/openshift/origin-aggregated-logging
34
33
type AggregatedLogging struct {
35
- masterConfig * configapi.MasterConfig
36
- loggingURL string
37
- MasterConfigFile string
34
+ Project string
38
35
OAuthClientClient oauthtypedclient.OAuthClientsGetter
39
36
ProjectClient projecttypedclient.ProjectsGetter
40
37
RouteClient routetypedclient.RoutesGetter
@@ -54,13 +51,16 @@ const (
54
51
openshiftValue = "openshift"
55
52
56
53
fluentdServiceAccountName = "aggregated-logging-fluentd"
54
+
55
+ flagLoggingProject = "logging-project"
57
56
)
58
57
59
58
var loggingSelector = labels.Set {loggingInfraKey : "support" }
59
+ var defaultLoggingProjects = []string {"openshift-logging" , "logging" }
60
60
61
61
//NewAggregatedLogging returns the AggregatedLogging Diagnostic
62
62
func NewAggregatedLogging (
63
- masterConfigFile string ,
63
+ project string ,
64
64
kclient kclientset.Interface ,
65
65
oauthClientClient oauthtypedclient.OAuthClientsGetter ,
66
66
projectClient projecttypedclient.ProjectsGetter ,
@@ -70,10 +70,7 @@ func NewAggregatedLogging(
70
70
sccClient securitytypedclient.SecurityContextConstraintsGetter ,
71
71
) * AggregatedLogging {
72
72
return & AggregatedLogging {
73
- masterConfig : nil ,
74
- // TODO this needs to be plumbed because the master-config no longer has it.
75
- loggingURL : "" ,
76
- MasterConfigFile : masterConfigFile ,
73
+ Project : project ,
77
74
OAuthClientClient : oauthClientClient ,
78
75
ProjectClient : projectClient ,
79
76
RouteClient : routeClient ,
@@ -153,20 +150,30 @@ func (d *AggregatedLogging) Requirements() (client bool, host bool) {
153
150
}
154
151
155
152
func (d * AggregatedLogging ) Complete (logger * log.Logger ) error {
156
- if len (d .MasterConfigFile ) > 0 {
157
- var err error
158
- d .masterConfig , err = hostdiag .GetMasterConfig (d .MasterConfigFile , logger )
153
+ if len (d .Project ) > 0 {
154
+ return nil
155
+ }
156
+
157
+ // Check if any of the default logging projects are present in the cluster
158
+ for _ , project := range defaultLoggingProjects {
159
+ d .Debug ("AGL0031" , fmt .Sprintf ("Trying default logging project %q" , project ))
160
+ _ , err := d .ProjectClient .Projects ().Get (project , metav1.GetOptions {})
159
161
if err != nil {
160
- return err
162
+ if kerrors .IsNotFound (err ) {
163
+ d .Debug ("AGL0032" , fmt .Sprintf ("Project %q not found" , project ))
164
+ continue
165
+ }
166
+ return fmt .Errorf ("failed fetching one of the default logging projects %q: %v" , project , err )
161
167
}
168
+
169
+ d .Debug ("AGL0033" , fmt .Sprintf ("Found default logging project %q" , project ))
170
+ d .Project = project
171
+ return nil
162
172
}
163
- return nil
173
+ return fmt . Errorf ( "default logging project not found, use '--%s' to specify logging project" , flagLoggingProject )
164
174
}
165
175
166
176
func (d * AggregatedLogging ) CanRun () (bool , error ) {
167
- if len (d .MasterConfigFile ) == 0 || d .masterConfig == nil {
168
- return false , errors .New ("No master config file was provided" )
169
- }
170
177
if d .OAuthClientClient == nil || d .ProjectClient == nil || d .RouteClient == nil || d .CRBClient == nil || d .DCClient == nil {
171
178
return false , errors .New ("Config must include a cluster-admin context to run this diagnostic" )
172
179
}
@@ -177,24 +184,33 @@ func (d *AggregatedLogging) CanRun() (bool, error) {
177
184
}
178
185
179
186
func (d * AggregatedLogging ) Check () types.DiagnosticResult {
180
- if len (d .loggingURL ) == 0 {
187
+ d .Debug ("AGL0015" , fmt .Sprintf ("Trying diagnostics for project '%s'" , d .Project ))
188
+ p , err := d .ProjectClient .Projects ().Get (d .Project , metav1.GetOptions {})
189
+ if err != nil {
190
+ d .Error ("AGL0018" , err , fmt .Sprintf ("There was an error retrieving project '%s' which is most likely a transient error: %s" , d .Project , err ))
181
191
return d .result
182
192
}
183
-
184
- project := retrieveLoggingProject (d .result , d .loggingURL , d .ProjectClient , d .RouteClient )
185
- if len (project ) != 0 {
186
- checkServiceAccounts (d , d , project )
187
- checkClusterRoleBindings (d , d , project )
188
- checkSccs (d , d , project )
189
- checkDeploymentConfigs (d , d , project )
190
- checkDaemonSets (d , d , project )
191
- checkServices (d , d , project )
192
- checkRoutes (d , d , project )
193
- checkKibana (d .result , d .RouteClient , d .OAuthClientClient , d .KubeClient , project )
193
+ nodeSelector , ok := p .ObjectMeta .Annotations ["openshift.io/node-selector" ]
194
+ if ! ok || len (nodeSelector ) != 0 {
195
+ d .Warn ("AGL0030" , nil , fmt .Sprintf (projectNodeSelectorWarning , d .Project ))
194
196
}
197
+ checkServiceAccounts (d , d , d .Project )
198
+ checkClusterRoleBindings (d , d , d .Project )
199
+ checkSccs (d , d , d .Project )
200
+ checkDeploymentConfigs (d , d , d .Project )
201
+ checkDaemonSets (d , d , d .Project )
202
+ checkServices (d , d , d .Project )
203
+ checkRoutes (d , d , d .Project )
204
+ checkKibana (d , d .RouteClient , d .OAuthClientClient , d .KubeClient , d .Project )
195
205
return d .result
196
206
}
197
207
208
+ func (d * AggregatedLogging ) AvailableParameters () []types.Parameter {
209
+ return []types.Parameter {
210
+ {flagLoggingProject , fmt .Sprintf ("Project that has deployed aggregated logging. Default projects: %s" , strings .Join (defaultLoggingProjects , " or " )), & d .Project , "" },
211
+ }
212
+ }
213
+
198
214
const projectNodeSelectorWarning = `
199
215
The project '%[1]s' was found with either a missing or non-empty node selector annotation.
200
216
This could keep Fluentd from running on certain nodes and collecting logs from the entire cluster.
@@ -207,51 +223,3 @@ and updating the annotation:
207
223
'openshift.io/node-selector' : ""
208
224
209
225
`
210
-
211
- func retrieveLoggingProject (r types.DiagnosticResult , loggingURL string , projectClient projecttypedclient.ProjectsGetter , routeClient routetypedclient.RoutesGetter ) string {
212
- r .Debug ("AGL0010" , fmt .Sprintf ("masterConfig.AssetConfig.LoggingPublicURL: '%s'" , loggingURL ))
213
- projectName := ""
214
- if len (loggingURL ) == 0 {
215
- r .Debug ("AGL0017" , "masterConfig.AssetConfig.LoggingPublicURL is empty" )
216
- return projectName
217
- }
218
-
219
- loggingUrl , err := url .Parse (loggingURL )
220
- if err != nil {
221
- r .Error ("AGL0011" , err , fmt .Sprintf ("Unable to parse the loggingPublicURL from the masterConfig '%s'" , loggingURL ))
222
- return projectName
223
- }
224
-
225
- routeList , err := routeClient .Routes (metav1 .NamespaceAll ).List (metav1.ListOptions {LabelSelector : loggingSelector .AsSelector ().String ()})
226
- if err != nil {
227
- r .Error ("AGL0012" , err , fmt .Sprintf ("There was an error while trying to find the route associated with '%s' which is probably transient: %s" , loggingUrl , err ))
228
- return projectName
229
- }
230
-
231
- for _ , route := range routeList .Items {
232
- r .Debug ("AGL0013" , fmt .Sprintf ("Comparing URL to route.Spec.Host: %s" , route .Spec .Host ))
233
- if loggingUrl .Host == route .Spec .Host {
234
- if len (projectName ) == 0 {
235
- projectName = route .ObjectMeta .Namespace
236
- r .Info ("AGL0015" , fmt .Sprintf ("Found route '%s' matching logging URL '%s' in project: '%s'" , route .ObjectMeta .Name , loggingUrl .Host , projectName ))
237
- } else {
238
- r .Warn ("AGL0019" , nil , fmt .Sprintf ("Found additional route '%s' matching logging URL '%s' in project: '%s'. This could mean you have multiple logging deployments." , route .ObjectMeta .Name , loggingUrl .Host , projectName ))
239
- }
240
- }
241
- }
242
- if len (projectName ) == 0 {
243
- message := fmt .Sprintf ("Unable to find a route matching the loggingPublicURL defined in the master config '%s'. Check that the URL is correct and aggregated logging is deployed." , loggingUrl )
244
- r .Error ("AGL0014" , errors .New (message ), message )
245
- return ""
246
- }
247
- project , err := projectClient .Projects ().Get (projectName , metav1.GetOptions {})
248
- if err != nil {
249
- r .Error ("AGL0018" , err , fmt .Sprintf ("There was an error retrieving project '%s' which is most likely a transient error: %s" , projectName , err ))
250
- return ""
251
- }
252
- nodeSelector , ok := project .ObjectMeta .Annotations ["openshift.io/node-selector" ]
253
- if ! ok || len (nodeSelector ) != 0 {
254
- r .Warn ("AGL0030" , nil , fmt .Sprintf (projectNodeSelectorWarning , projectName ))
255
- }
256
- return projectName
257
- }
0 commit comments