Skip to content

Commit

Permalink
Merge pull request #17857 from sosiouxme/20171217-diagnostic-refactor…
Browse files Browse the repository at this point in the history
…-summary

Automatic merge from submit-queue (batch tested with PRs 17857, 18252, 18198).

diagnostics: refactor build-and-run for clarity

This builds on #17773 which is the source of the first commit. Look at the second commit for the new changes.

----

Improve the legibility of the code that builds and runs diagnostics.

The main confusion was the need to track and report the number of diagnostic errors and warnings versus problems that halt execution prematurely and the need to return a correct status code at completion. In the end it seemed simplest to just have the logger report how many diagnostic errors and warnings were seen, leaving function signatures to return only build/run errors.

As a side effect, I looked at the ConfigLoading code that does an early check to see if there is a client config, and concluded it was confusing and unnecessary for it to be a diagnostic, so I refactored it away.

Commands for main diagnostics as well as pod diagnostics are now implemented more uniformly.
  • Loading branch information
openshift-merge-robot authored Jan 24, 2018
2 parents 8220060 + 8059482 commit ee846d3
Show file tree
Hide file tree
Showing 10 changed files with 347 additions and 484 deletions.
10 changes: 5 additions & 5 deletions pkg/oc/admin/diagnostics/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ func availableClientDiagnostics() types.DiagnosticList {
}

// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
// Returns the Diagnostics built, and any fatal errors encountered during the building of diagnostics.
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, error) {
available := availableClientDiagnostics().Names()

networkClient, err := o.Factory.OpenshiftInternalNetworkClient()
kubeClient, clientErr := o.Factory.ClientSet()
if clientErr != nil || err != nil {
o.Logger.Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
o.Logger().Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
available = sets.NewString(clientdiags.ConfigContextsName)
}

Expand Down Expand Up @@ -64,8 +64,8 @@ func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Confi
nd.PreventModification = o.PreventModification
diagnostics = append(diagnostics, nd)
default:
return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
return nil, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
}
return diagnostics, true, clientErr
return diagnostics, clientErr
}
104 changes: 53 additions & 51 deletions pkg/oc/admin/diagnostics/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,52 +43,50 @@ func availableClusterDiagnostics() types.DiagnosticList {
}

// buildClusterDiagnostics builds cluster Diagnostic objects if a cluster-admin client can be extracted from the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
// Returns the Diagnostics built and any fatal error encountered during the building of diagnostics.
func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, error) {
requestedDiagnostics := availableClusterDiagnostics().Names().Intersection(sets.NewString(o.RequestedDiagnostics.List()...)).List()
if len(requestedDiagnostics) == 0 { // no diagnostics to run here
return nil, true, nil // don't waste time on discovery
return nil, nil // don't waste time on discovery
}

var (
kclusterClient kclientset.Interface
)
var kclusterClient kclientset.Interface

config, kclusterClient, found, serverUrl, err := o.findClusterClients(rawConfig)
if !found {
o.Logger.Notice("CED1002", "Could not configure a client with cluster-admin permissions for the current server, so cluster diagnostics will be skipped")
return nil, true, err
config, kclusterClient, serverUrl, err := o.findClusterClients(rawConfig)
if config == nil {
o.Logger().Notice("CED1002", "Could not configure a client with cluster-admin permissions for the current server, so cluster diagnostics will be skipped")
return nil, nil
}
if err != nil {
return nil, false, err
return nil, err
}
imageClient, err := imageclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
projectClient, err := projectclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
routeClient, err := routeclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
appsClient, err := appsclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
oauthClient, err := oauthclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
oauthorizationClient, err := oauthorizationclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}
securityClient, err := securityclient.NewForConfig(config)
if err != nil {
return nil, false, err
return nil, err
}

diagnostics := []types.Diagnostic{}
Expand Down Expand Up @@ -116,64 +114,68 @@ func (o DiagnosticsOptions) buildClusterDiagnostics(rawConfig *clientcmdapi.Conf
case clustdiags.RouteCertificateValidationName:
d = &clustdiags.RouteCertificateValidation{SARClient: kclusterClient.Authorization(), RESTConfig: config}
default:
return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
return nil, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
diagnostics = append(diagnostics, d)
}
return diagnostics, true, nil
return diagnostics, nil
}

// attempts to find which context in the config might be a cluster-admin for the server in the current context.
func (o DiagnosticsOptions) findClusterClients(rawConfig *clientcmdapi.Config) (*rest.Config, kclientset.Interface, bool, string, error) {
// returns config for the context chosen, kclusterClient for same, serverUrl of same, and any fatal error
func (o DiagnosticsOptions) findClusterClients(rawConfig *clientcmdapi.Config) (*rest.Config, kclientset.Interface, string, error) {
if o.ClientClusterContext != "" { // user has specified cluster context to use
if context, exists := rawConfig.Contexts[o.ClientClusterContext]; !exists {
context, exists := rawConfig.Contexts[o.ClientClusterContext]
if !exists {
configErr := fmt.Errorf("Specified '%s' as cluster-admin context, but it was not found in your client configuration.", o.ClientClusterContext)
o.Logger.Error("CED1003", configErr.Error())
return nil, nil, false, "", configErr
} else if config, kube, found, serverUrl, err := o.makeClusterClients(rawConfig, o.ClientClusterContext, context); found {
return config, kube, true, serverUrl, err
} else {
return nil, nil, false, "", err
o.Logger().Error("CED1003", configErr.Error())
return nil, nil, "", configErr
}
config, kube, serverUrl, err := o.makeClusterClients(rawConfig, o.ClientClusterContext, context)
if err != nil || config == nil {
return nil, nil, "", err
}
return config, kube, serverUrl, nil
}
currentContext, exists := rawConfig.Contexts[rawConfig.CurrentContext]
if !exists { // config specified cluster admin context that doesn't exist; complain and quit
configErr := fmt.Errorf("Current context '%s' not found in client configuration; will not attempt cluster diagnostics.", rawConfig.CurrentContext)
o.Logger.Error("CED1004", configErr.Error())
return nil, nil, false, "", configErr
o.Logger().Error("CED1004", configErr.Error())
return nil, nil, "", configErr
}
// check if current context is already cluster admin
if config, kube, found, serverUrl, err := o.makeClusterClients(rawConfig, rawConfig.CurrentContext, currentContext); found {
return config, kube, true, serverUrl, err
config, kube, serverUrl, err := o.makeClusterClients(rawConfig, rawConfig.CurrentContext, currentContext)
if err == nil && config != nil {
return config, kube, serverUrl, nil
}
// otherwise, for convenience, search for a context with the same server but with the system:admin user
for name, context := range rawConfig.Contexts {
if context.Cluster == currentContext.Cluster && name != rawConfig.CurrentContext && strings.HasPrefix(context.AuthInfo, "system:admin/") {
if config, kube, found, serverUrl, err := o.makeClusterClients(rawConfig, name, context); found {
return config, kube, true, serverUrl, err
} else {
return nil, nil, false, "", err // don't try more than one such context, they'll probably fail the same
config, kube, serverUrl, err := o.makeClusterClients(rawConfig, name, context)
if err != nil || config == nil {
break // don't try more than one such context, they'll probably fail the same
}
return config, kube, serverUrl, nil
}
}
return nil, nil, false, "", nil
return nil, nil, "", nil
}

// makes the client from the specified context and determines whether it is a cluster-admin.
func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*rest.Config, kclientset.Interface, bool, string, error) {
func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, contextName string, context *clientcmdapi.Context) (*rest.Config, kclientset.Interface, string, error) {
overrides := &clientcmd.ConfigOverrides{Context: *context}
clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, overrides)
serverUrl := rawConfig.Clusters[context.Cluster].Server
factory := osclientcmd.NewFactory(clientConfig)
config, err := factory.ClientConfig()
if err != nil {
o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err))
return nil, nil, false, "", nil
o.Logger().Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err))
return nil, nil, "", nil
}
o.Logger.Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName))
o.Logger().Debug("CED1005", fmt.Sprintf("Checking if context is cluster-admin: '%s'", contextName))
if kubeClient, err := factory.ClientSet(); err != nil {
o.Logger.Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err))
return nil, nil, false, "", nil
o.Logger().Debug("CED1006", fmt.Sprintf("Error creating client for context '%s':\n%v", contextName, err))
return nil, nil, "", nil
} else {
subjectAccessReview := &authorization.SelfSubjectAccessReview{
Spec: authorization.SelfSubjectAccessReviewSpec{
Expand All @@ -187,17 +189,17 @@ func (o DiagnosticsOptions) makeClusterClients(rawConfig *clientcmdapi.Config, c
}
if resp, err := kubeClient.Authorization().SelfSubjectAccessReviews().Create(subjectAccessReview); err != nil {
if regexp.MustCompile(`User "[\w:]+" cannot create \w+ at the cluster scope`).MatchString(err.Error()) {
o.Logger.Debug("CED1007", fmt.Sprintf("Context '%s' does not have cluster-admin access:\n%v", contextName, err))
return nil, nil, false, "", nil
o.Logger().Debug("CED1007", fmt.Sprintf("Context '%s' does not have cluster-admin access:\n%v", contextName, err))
return nil, nil, "", nil
} else {
o.Logger.Error("CED1008", fmt.Sprintf("Unknown error testing cluster-admin access for context '%s':\n%v", contextName, err))
return nil, nil, false, "", err
o.Logger().Error("CED1008", fmt.Sprintf("Unknown error testing cluster-admin access for context '%s':\n%v", contextName, err))
return nil, nil, "", err
}
} else if resp.Status.Allowed {
o.Logger.Info("CED1009", fmt.Sprintf("Using context for cluster-admin access: '%s'", contextName))
return config, kubeClient, true, serverUrl, nil
o.Logger().Info("CED1009", fmt.Sprintf("Using context for cluster-admin access: '%s'", contextName))
return config, kubeClient, serverUrl, nil
}
}
o.Logger.Debug("CED1010", fmt.Sprintf("Context does not have cluster-admin access: '%s'", contextName))
return nil, nil, false, "", nil
o.Logger().Debug("CED1010", fmt.Sprintf("Context does not have cluster-admin access: '%s'", contextName))
return nil, nil, "", nil
}
140 changes: 124 additions & 16 deletions pkg/oc/admin/diagnostics/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,17 @@ package diagnostics

import (
"errors"
"fmt"
"io/ioutil"
"os"

"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"

"github.com/openshift/origin/pkg/client/config"
clientdiagnostics "github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/client"
"github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/types"
"github.com/openshift/origin/pkg/oc/admin/diagnostics/diagnostics/util"
)

// determine if we even have a client config
func (o DiagnosticsOptions) detectClientConfig() (bool, bool, []types.DiagnosticError, []types.DiagnosticError) {
if o.ClientFlags == nil {
return false, false, []types.DiagnosticError{}, []types.DiagnosticError{}
}
diagnostic := &clientdiagnostics.ConfigLoading{ConfFlagName: config.OpenShiftConfigFlagName, ClientFlags: o.ClientFlags}
o.Logger.Notice("CED2011", "Determining if client configuration exists for client/cluster diagnostics")
result := diagnostic.Check()
for _, entry := range result.Logs() {
o.Logger.LogEntry(entry)
}
return true, diagnostic.SuccessfulLoad(), result.Warnings(), result.Errors()
}

// use the base factory to return a raw config (not specific to a context)
func (o DiagnosticsOptions) buildRawConfig() (*clientcmdapi.Config, error) {
kubeConfig, configErr := o.Factory.OpenShiftClientConfig().RawConfig()
Expand All @@ -35,3 +24,122 @@ func (o DiagnosticsOptions) buildRawConfig() (*clientcmdapi.Config, error) {
}
return &kubeConfig, nil
}

// determine if we even have a client config
func (o DiagnosticsOptions) detectClientConfig() (expected bool, detected bool) {
if o.ClientFlags == nil {
// options for client not provided, so it must not be expected.
return false, false
}
o.Logger().Notice("CED2011", "Determining if client configuration exists for client/cluster diagnostics")
confFlagName := config.OpenShiftConfigFlagName
confFlagValue := o.ClientFlags.Lookup(confFlagName).Value.String()
successfulLoad := false

var foundPath string
rules := config.NewOpenShiftClientConfigLoadingRules()
paths := append([]string{confFlagValue}, rules.Precedence...)
for index, path := range paths {
errmsg := ""
switch index {
case 0:
errmsg = fmt.Sprintf("--%s specified that client config should be at %s\n", confFlagName, path)
case len(paths) - 1: // config in ~/.kube
// no error message indicated if it is not there... user didn't say it would be
default: // can be multiple paths from the env var in theory; all cases should go here
if len(os.Getenv(config.OpenShiftConfigPathEnvVar)) != 0 {
errmsg = fmt.Sprintf("Env var %s specified that client config could be at %s\n", config.OpenShiftConfigPathEnvVar, path)
}
}

if o.canOpenConfigFile(path, errmsg) && len(foundPath) == 0 {
successfulLoad = true
foundPath = path
}
}
if len(foundPath) > 0 {
if len(confFlagValue) > 0 && confFlagValue != foundPath {
// found config but not where --config said
o.Logger().Error("DCli1001", fmt.Sprintf(`
The client configuration file was not found where the --%s flag indicated:
%s
A config file was found at the following location:
%s
If you wish to use this file for client configuration, you can specify it
with the --%[1]s flag, or just not specify the flag.
`, confFlagName, confFlagValue, foundPath))
}
} else { // not found, check for master-generated ones to recommend
if len(confFlagValue) > 0 {
o.Logger().Error("DCli1002", fmt.Sprintf("Did not find config file where --%s=%s indicated", confFlagName, confFlagValue))
}
adminWarningF := `
No client config file was available; however, one exists at
%[2]s
which may have been generated automatically by the master.
If you want to use this config, you should copy it to the
standard location (%[3]s),
or you can set the environment variable %[1]s:
export %[1]s=%[2]s
If not, obtain a config file and place it in the standard
location for use by the client and diagnostics.
`
// look for it in auto-generated locations when not found properly
for _, path := range util.AdminKubeConfigPaths {
msg := fmt.Sprintf("Looking for a possible client config at %s\n", path)
if o.canOpenConfigFile(path, msg) {
o.Logger().Warn("DCli1003", fmt.Sprintf(adminWarningF, config.OpenShiftConfigPathEnvVar, path, config.RecommendedHomeFile))
break
}
}
}
return true, successfulLoad
}

// ----------------------------------------------------------
// Attempt to open file at path as client config
// If there is a problem and errmsg is set, log an error
func (o DiagnosticsOptions) canOpenConfigFile(path string, errmsg string) bool {
var (
file *os.File
err error
)
if len(path) == 0 { // empty param/envvar
return false
} else if file, err = os.Open(path); err == nil {
o.Logger().Debug("DCli1004", fmt.Sprintf("Reading client config at %s", path))
} else if len(errmsg) == 0 {
o.Logger().Debug("DCli1005", fmt.Sprintf("Could not read client config at %s:\n%#v", path, err))
} else if os.IsNotExist(err) {
o.Logger().Debug("DCli1006", errmsg+"but that file does not exist.")
} else if os.IsPermission(err) {
o.Logger().Error("DCli1007", errmsg+"but lack permission to read that file.")
} else {
o.Logger().Error("DCli1008", fmt.Sprintf("%sbut there was an error opening it:\n%#v", errmsg, err))
}
if file == nil {
return false
}

// file is open for reading
defer file.Close()
if buffer, err := ioutil.ReadAll(file); err != nil {
o.Logger().Error("DCli1009", fmt.Sprintf("Unexpected error while reading client config file (%s): %v", path, err))
} else if _, err := clientcmd.Load(buffer); err != nil {
o.Logger().Error("DCli1010", fmt.Sprintf(`
Error reading YAML from client config file (%s):
%v
This file may have been truncated or mis-edited.
Please fix, remove, or obtain a new client config`, file.Name(), err))
} else {
o.Logger().Info("DCli1011", fmt.Sprintf("Successfully read a client config file at '%s'", path))
/* Note, we're not going to use this config file directly.
* Instead, we'll defer to the openshift client code to assimilate
* flags, env vars, and the potential hierarchy of config files
* into an actual configuration that the client uses.
* However, for diagnostic purposes, record the files we find.
*/
return true
}
return false
}
Loading

0 comments on commit ee846d3

Please sign in to comment.