diff --git a/Gopkg.lock b/Gopkg.lock index eb493ce3c..9d8a69f67 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -61,12 +61,12 @@ version = "v1.1.1" [[projects]] - digest = "1:32b7b054d74ab43a5fb9bac9681536821f484c8b90db7971c7d3ba024f2c1918" + branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" name = "github.com/golang/glog" packages = ["."] pruneopts = "NUT" - revision = "3c92600d7533018d216b534fe894ad60a1e6d5bf" - source = "https://github.com/openshift/golang-glog.git" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" @@ -213,7 +213,7 @@ [[projects]] branch = "master" - digest = "1:5f9e9064de248b2e2de27f1890007e277cc8deaa21bea00f61b74372a94c4a3f" + digest = "1:278ca049b05818a0f88289b44839a1c3f6fc5c3e04ee38e9dedda5116d4b2919" name = "github.com/openshift/api" packages = [ "config/v1", @@ -223,11 +223,11 @@ "security/v1", ] pruneopts = "NUT" - revision = "515b3561c8a150ecbd1a2dd744c7bd829b3308f2" + revision = "06fefbf8a8cfda80b930ea27e8772eb95280310b" [[projects]] branch = "master" - digest = "1:192abee7a3aa1ad1e84356e6fa4c14e9161ed107b44ff8fa80bbf1015c5ef426" + digest = "1:997fb0392baf5e229435667acaf6aabcf7fcf6a753a8e42149872ed888252e61" name = "github.com/openshift/client-go" packages = [ "config/clientset/versioned", @@ -244,7 +244,7 @@ "security/clientset/versioned/typed/security/v1", ] pruneopts = "NUT" - revision = "0255926f53935175fe90b8e7672c4c06c17d79e6" + revision = "c44a8b61b9f46cd9e802384dfeda0bc9942db68a" [[projects]] branch = "master" @@ -784,7 +784,6 @@ "github.com/blang/semver", "github.com/davecgh/go-spew/spew", "github.com/ghodss/yaml", - "github.com/golang/glog", "github.com/google/uuid", "github.com/openshift/api/config/v1", "github.com/openshift/api/image/v1", diff --git a/pkg/cvo/cvo_test.go b/pkg/cvo/cvo_test.go index 4a296c326..b503d6771 100644 --- a/pkg/cvo/cvo_test.go +++ b/pkg/cvo/cvo_test.go @@ -47,10 +47,10 @@ type clientCVLister struct { } func (c *clientCVLister) Get(name string) (*configv1.ClusterVersion, error) { - return c.client.Config().ClusterVersions().Get(name, metav1.GetOptions{}) + return c.client.ConfigV1().ClusterVersions().Get(name, metav1.GetOptions{}) } func (c *clientCVLister) List(selector labels.Selector) (ret []*configv1.ClusterVersion, err error) { - list, err := c.client.Config().ClusterVersions().List(metav1.ListOptions{LabelSelector: selector.String()}) + list, err := c.client.ConfigV1().ClusterVersions().List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } @@ -66,10 +66,10 @@ type clientCOLister struct { } func (c *clientCOLister) Get(name string) (*configv1.ClusterOperator, error) { - return c.client.Config().ClusterOperators().Get(name, metav1.GetOptions{}) + return c.client.ConfigV1().ClusterOperators().Get(name, metav1.GetOptions{}) } func (c *clientCOLister) List(selector labels.Selector) (ret []*configv1.ClusterOperator, err error) { - list, err := c.client.Config().ClusterOperators().List(metav1.ListOptions{LabelSelector: selector.String()}) + list, err := c.client.ConfigV1().ClusterOperators().List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return nil, err } diff --git a/pkg/start/start_integration_test.go b/pkg/start/start_integration_test.go index 6d4b8c9df..b3723a4b2 100644 --- a/pkg/start/start_integration_test.go +++ b/pkg/start/start_integration_test.go @@ -203,7 +203,7 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { t.Fatal(err) } defer func() { - if err := client.Config().ClusterVersions().Delete(ns, nil); err != nil { + if err := client.ConfigV1().ClusterVersions().Delete(ns, nil); err != nil { t.Logf("failed to delete cluster version %s: %v", ns, err) } if err := kc.Core().Namespaces().Delete(ns, nil); err != nil { @@ -271,7 +271,7 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { t.Fatal(err) } time.Sleep(time.Second) - cv, err := client.Config().ClusterVersions().Get(ns, metav1.GetOptions{}) + cv, err := client.ConfigV1().ClusterVersions().Get(ns, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -281,7 +281,7 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { verifyReleasePayload(t, kc, ns, "0.0.1", payloadImage1) t.Logf("trigger an update to a new version") - cv, err = client.Config().ClusterVersions().Patch(ns, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"desiredUpdate":{"image":"%s"}}}`, payloadImage2))) + cv, err = client.ConfigV1().ClusterVersions().Patch(ns, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"desiredUpdate":{"image":"%s"}}}`, payloadImage2))) if err != nil { t.Fatal(err) } @@ -317,7 +317,7 @@ func TestIntegrationCVO_initializeAndUpgrade(t *testing.T) { t.Fatal(err) } time.Sleep(time.Second) - cv, err = client.Config().ClusterVersions().Get(ns, metav1.GetOptions{}) + cv, err = client.ConfigV1().ClusterVersions().Get(ns, metav1.GetOptions{}) if err != nil { t.Fatal(err) } @@ -354,7 +354,7 @@ func TestIntegrationCVO_initializeAndHandleError(t *testing.T) { t.Fatal(err) } defer func() { - if err := client.Config().ClusterVersions().Delete(ns, nil); err != nil { + if err := client.ConfigV1().ClusterVersions().Delete(ns, nil); err != nil { t.Logf("failed to delete cluster version %s: %v", ns, err) } if err := kc.Core().Namespaces().Delete(ns, nil); err != nil { @@ -411,7 +411,7 @@ func TestIntegrationCVO_initializeAndHandleError(t *testing.T) { verifyReleasePayload(t, kc, ns, "0.0.1", payloadImage1) t.Logf("trigger an update to a new version that should fail") - cv, err := client.Config().ClusterVersions().Patch(ns, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"desiredUpdate":{"image":"%s"}}}`, payloadImage2))) + cv, err := client.ConfigV1().ClusterVersions().Patch(ns, types.MergePatchType, []byte(fmt.Sprintf(`{"spec":{"desiredUpdate":{"image":"%s"}}}`, payloadImage2))) if err != nil { t.Fatal(err) } @@ -440,7 +440,7 @@ func TestIntegrationCVO_initializeAndHandleError(t *testing.T) { verifyReleasePayloadConfigMap2(t, kc, ns, "0.0.1", payloadImage1) t.Logf("switch back to 0.0.1 and verify it succeeds") - cv, err = client.Config().ClusterVersions().Patch(ns, types.MergePatchType, []byte(`{"spec":{"desiredUpdate":{"image":"", "version":"0.0.1"}}}`)) + cv, err = client.ConfigV1().ClusterVersions().Patch(ns, types.MergePatchType, []byte(`{"spec":{"desiredUpdate":{"image":"", "version":"0.0.1"}}}`)) if err != nil { t.Fatal(err) } @@ -481,7 +481,7 @@ func TestIntegrationCVO_gracefulStepDown(t *testing.T) { t.Fatal(err) } defer func() { - if err := client.Config().ClusterVersions().Delete(ns, nil); err != nil { + if err := client.ConfigV1().ClusterVersions().Delete(ns, nil); err != nil { t.Logf("failed to delete cluster version %s: %v", ns, err) } if err := kc.Core().Namespaces().Delete(ns, nil); err != nil { @@ -604,7 +604,7 @@ func TestIntegrationCVO_cincinnatiRequest(t *testing.T) { t.Fatal(err) } defer func() { - if err := client.Config().ClusterVersions().Delete(ns, nil); err != nil { + if err := client.ConfigV1().ClusterVersions().Delete(ns, nil); err != nil { t.Logf("failed to delete cluster version %s: %v", ns, err) } if err := kc.Core().Namespaces().Delete(ns, nil); err != nil { @@ -711,7 +711,7 @@ metadata: func waitForUpdateAvailable(t *testing.T, client clientset.Interface, ns string, allowIncrementalFailure bool, versions ...string) (*configv1.ClusterVersion, error) { var lastCV *configv1.ClusterVersion return lastCV, wait.PollImmediate(200*time.Millisecond, 60*time.Second, func() (bool, error) { - cv, err := client.Config().ClusterVersions().Get(ns, metav1.GetOptions{}) + cv, err := client.ConfigV1().ClusterVersions().Get(ns, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } @@ -822,7 +822,7 @@ func waitForUpdateAvailable(t *testing.T, client clientset.Interface, ns string, func waitUntilUpgradeFails(t *testing.T, client clientset.Interface, ns string, failingReason, failingMessage, progressingMessage string, versions ...string) (*configv1.ClusterVersion, error) { var lastCV *configv1.ClusterVersion return lastCV, wait.PollImmediate(200*time.Millisecond, 60*time.Second, func() (bool, error) { - cv, err := client.Config().ClusterVersions().Get(ns, metav1.GetOptions{}) + cv, err := client.ConfigV1().ClusterVersions().Get(ns, metav1.GetOptions{}) if errors.IsNotFound(err) { return false, nil } diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index f4c0c1ff2..54bd7afdc 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -1,4 +1,6 @@ -// Copyright 2017 Istio Authors +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,149 +14,1167 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package glog exposes an API subset of the [glog](https://github.com/golang/glog) package. -// All logging state delivered to this package is shunted to the global [klog logger](ttps://github.com/kubernetes/klog). +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". // -// We depend on some downstream components that use glog for logging. This package makes it so we can intercept -// the calls to glog and redirect them to klog and thus produce a consistent log for our processes. package glog import ( - "k8s.io/klog" + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 ) -// Level is a shim +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. type Level int32 -// Verbose is a shim -type Verbose bool +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} -// Flush is a shim +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } } -// V is a shim +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. func V(level Level) Verbose { - return Verbose(bool(klog.V(klog.Level(int32(level))))) + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) } -// Info is a shim +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v { - klog.Info(args...) + logging.print(infoLog, args...) } } -// Infoln is a shim +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v { - klog.Infoln(args...) + logging.println(infoLog, args...) } } -// Infof is a shim +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v { - klog.Infof(format, args...) + logging.printf(infoLog, format, args...) } } -// Info is a shim +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - klog.Info(args...) + logging.print(infoLog, args...) } -// InfoDepth is a shim +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - klog.InfoDepth(depth, args...) + logging.printDepth(infoLog, depth, args...) } -// Infoln is a shim +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Infoln(args ...interface{}) { - klog.Infoln(args...) + logging.println(infoLog, args...) } -// Infof is a shim +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - klog.Infof(format, args...) + logging.printf(infoLog, format, args...) } -// Warning is a shim +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - klog.Warning(args...) + logging.print(warningLog, args...) } -// WarningDepth is a shim +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - klog.WarningDepth(depth, args...) + logging.printDepth(warningLog, depth, args...) } -// Warningln is a shim +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Warningln(args ...interface{}) { - klog.Warningln(args...) + logging.println(warningLog, args...) } -// Warningf is a shim +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - klog.Warningf(format, args...) + logging.printf(warningLog, format, args...) } -// Error is a shim +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - klog.Error(args...) + logging.print(errorLog, args...) } -// ErrorDepth is a shim +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - klog.ErrorDepth(depth, args...) + logging.printDepth(errorLog, depth, args...) } -// Errorln is a shim +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Errorln(args ...interface{}) { - klog.Errorln(args...) + logging.println(errorLog, args...) } -// Errorf is a shim +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - klog.Errorf(format, args...) + logging.printf(errorLog, format, args...) } -// Fatal is a shim +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - klog.Fatal(args...) + logging.print(fatalLog, args...) } -// FatalDepth is a shim +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - klog.FatalDepth(depth, args...) + logging.printDepth(fatalLog, depth, args...) } -// Fatalln is a shim +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. func Fatalln(args ...interface{}) { - klog.Fatalln(args...) + logging.println(fatalLog, args...) } -// Fatalf is a shim +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - klog.Fatalf(format, args...) + logging.printf(fatalLog, format, args...) } -// Exit is a shim +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { - klog.Exit(args...) + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) } -// ExitDepth is a shim +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { - klog.ExitDepth(depth, args...) + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) } -// Exitln is a shim +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). func Exitln(args ...interface{}) { - klog.Exitln(args...) + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) } -// Exitf is a shim +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { - klog.Exitf(format, args...) + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) } diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 000000000..65075d281 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 9a13b8527..e0b669adb 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -30,6 +30,12 @@ type APIServerSpec struct { // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. // +optional ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` } type APIServerServingCerts struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index c8b5b482f..40ad00151 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -6,7 +6,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Console holds cluster-wide information about Console. The canonical name is `cluster` +// Console holds cluster-wide information about Console. The canonical name is `cluster`. type Console struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -20,11 +20,13 @@ type Console struct { Status ConsoleStatus `json:"status"` } +// ConsoleSpec is the specification of the desired behavior of the Console. type ConsoleSpec struct { // +optional Authentication ConsoleAuthentication `json:"authentication"` } +// ConsoleStatus defines the observed status of the Console. type ConsoleStatus struct { // The URL for the console. This will be derived from the host for the route that // is created for the console. @@ -40,6 +42,7 @@ type ConsoleList struct { Items []Console `json:"items"` } +// ConsoleAuthentication defines a list of optional configuration for console authentication. type ConsoleAuthentication struct { // An optional, absolute URL to redirect web browsers to after logging out of // the console. If not specified, it will redirect to the default login page. diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index ca51f1637..d9e54b633 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -94,7 +94,7 @@ type RegistryLocation struct { // RegistrySources holds cluster-wide information about how to handle the registries config. type RegistrySources struct { - // InsecureRegistries are registries which do not have a valid SSL certificate or only support HTTP connections. + // InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. // +optional InsecureRegistries []string `json:"insecureRegistries,omitempty"` // BlockedRegistries are blacklisted from image pull/push. All other registries are allowed. diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 40e3f2c27..06960091b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -37,25 +37,31 @@ type InfrastructureStatus struct { // alphanumeric or hyphen characters. InfrastructureName string `json:"infrastructureName"` - // platform is the underlying infrastructure provider for the cluster. This - // value controls whether infrastructure automation such as service load - // balancers, dynamic volume provisioning, machine creation and deletion, and - // other integrations are enabled. If None, no infrastructure automation is - // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", and "None". Individual components may not support - // all platforms, and must handle unrecognized platforms as None if they do - // not support that platform. + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. Platform PlatformType `json:"platform,omitempty"` + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering // etcd servers and clients. // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` - // apiServerURL is a valid URL with scheme(http/https), address and port. - // apiServerURL can be used by components like kubelet on machines, to contact the `apisever` - // using the infrastructure provider rather than the kubernetes networking. + // apiServerURL is a valid URI with scheme(http/https), address and + // port. apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme(http/https), + // address and port. apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` } // PlatformType is a specific supported infrastructure provider. @@ -85,8 +91,59 @@ const ( // VSpherePlatformType represents VMWare vSphere infrastructure. VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" ) +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", and "None". Individual components may not support + // all platforms, and must handle unrecognized platforms as None if they do + // not support that platform. + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` +} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` +} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 5b5ab1929..40ba5e493 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -24,7 +24,8 @@ type Network struct { // NetworkSpec is the desired network configuration. // As a general rule, this SHOULD NOT be read directly. Instead, you should // consume the NetworkStatus, as it indicates the currently deployed configuration. -// Currently, none of these fields may be changed after installation. +// Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after +// installation is not supported. type NetworkSpec struct { // IP address pool to use for pod IPs. ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` @@ -39,6 +40,11 @@ type NetworkSpec struct { // Currently supported values are: // - OpenShiftSDN NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` } // NetworkStatus is the current network configuration. @@ -67,6 +73,39 @@ type ClusterNetworkEntry struct { HostPrefix uint32 `json:"hostPrefix"` } +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil, any value is allowed for an ExternalIP. If the empty/zero + // policy is supplied, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type NetworkList struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 4004ace8b..b869785f0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -15,8 +15,12 @@ type Proxy struct { // Spec holds user-settable values for the proxy configuration // +required Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` } +// ProxySpec contains cluster proxy creation configuration. type ProxySpec struct { // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. // +optional @@ -26,7 +30,23 @@ type ProxySpec struct { // +optional HTTPSProxy string `json:"httpsProxy,omitempty"` - // noProxy is the list of domains for which the proxy should not be used. Empty means unset and will not result in an env var. + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. // +optional NoProxy string `json:"noProxy,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 3ad614c5e..4a0714b5e 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -121,6 +121,11 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { *out = *in in.ServingCerts.DeepCopyInto(&out.ServingCerts) out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -150,6 +155,22 @@ func (in *APIServerStatus) DeepCopy() *APIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { *out = *in @@ -317,6 +338,22 @@ func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { *out = *in @@ -1141,6 +1178,58 @@ func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { *out = *in @@ -1260,6 +1349,22 @@ func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in @@ -1613,7 +1718,7 @@ func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -1688,6 +1793,11 @@ func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } return } @@ -1998,6 +2108,11 @@ func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } return } @@ -2271,6 +2386,37 @@ func (in *OperandVersion) DeepCopy() *OperandVersion { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Project) DeepCopyInto(out *Project) { *out = *in @@ -2371,6 +2517,7 @@ func (in *Proxy) DeepCopyInto(out *Proxy) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec + out.Status = in.Status return } @@ -2441,6 +2588,22 @@ func (in *ProxySpec) DeepCopy() *ProxySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 661970072..5b213d117 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -271,8 +271,9 @@ func (APIServerServingCerts) SwaggerDoc() map[string]string { } var map_APIServerSpec = map[string]string{ - "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", - "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", } func (APIServerSpec) SwaggerDoc() map[string]string { @@ -539,7 +540,7 @@ func (UpdateHistory) SwaggerDoc() map[string]string { } var map_Console = map[string]string{ - "": "Console holds cluster-wide information about Console. The canonical name is `cluster`", + "": "Console holds cluster-wide information about Console. The canonical name is `cluster`.", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", @@ -550,6 +551,7 @@ func (Console) SwaggerDoc() map[string]string { } var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", } @@ -565,7 +567,16 @@ func (ConsoleList) SwaggerDoc() map[string]string { return map_ConsoleList } +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", } @@ -690,7 +701,7 @@ func (RegistryLocation) SwaggerDoc() map[string]string { var map_RegistrySources = map[string]string{ "": "RegistrySources holds cluster-wide information about how to handle the registries config.", - "insecureRegistries": "InsecureRegistries are registries which do not have a valid SSL certificate or only support HTTP connections.", + "insecureRegistries": "InsecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", "blockedRegistries": "BlockedRegistries are blacklisted from image pull/push. All other registries are allowed.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", "allowedRegistries": "AllowedRegistries are whitelisted for image pull/push. All other registries are blocked.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", } @@ -699,6 +710,34 @@ func (RegistrySources) SwaggerDoc() map[string]string { return map_RegistrySources } +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + var map_Infrastructure = map[string]string{ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`", "metadata": "Standard object's metadata.", @@ -729,17 +768,31 @@ func (InfrastructureSpec) SwaggerDoc() map[string]string { } var map_InfrastructureStatus = map[string]string{ - "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", - "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", - "platform": "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery", - "apiServerURL": "apiServerURL is a valid URL with scheme(http/https), address and port. apiServerURL can be used by components like kubelet on machines, to contact the `apisever` using the infrastructure provider rather than the kubernetes networking.", + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery", + "apiServerURL": "apiServerURL is a valid URI with scheme(http/https), address and port. apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme(http/https), address and port. apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", } func (InfrastructureStatus) SwaggerDoc() map[string]string { return map_InfrastructureStatus } +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + var map_Ingress = map[string]string{ "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`", "metadata": "Standard object's metadata.", @@ -777,6 +830,26 @@ func (ClusterNetworkEntry) SwaggerDoc() map[string]string { return map_ClusterNetworkEntry } +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil, any value is allowed for an ExternalIP. If the empty/zero policy is supplied, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + var map_Network = map[string]string{ "": "Network holds cluster-wide information about Network. The canonical name is `cluster`", "metadata": "Standard object's metadata.", @@ -797,10 +870,11 @@ func (NetworkList) SwaggerDoc() map[string]string { } var map_NetworkSpec = map[string]string{ - "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, none of these fields may be changed after installation.", + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, changing ClusterNetwork, ServiceNetwork, or NetworkType after installation is not supported.", "clusterNetwork": "IP address pool to use for pod IPs.", "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP", } func (NetworkSpec) SwaggerDoc() map[string]string { @@ -1077,8 +1151,9 @@ func (TemplateReference) SwaggerDoc() map[string]string { } var map_Proxy = map[string]string{ - "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`", - "spec": "Spec holds user-settable values for the proxy configuration", + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`", + "spec": "Spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Proxy) SwaggerDoc() map[string]string { @@ -1094,15 +1169,27 @@ func (ProxyList) SwaggerDoc() map[string]string { } var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", - "noProxy": "noProxy is the list of domains for which the proxy should not be used. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", } func (ProxySpec) SwaggerDoc() map[string]string { return map_ProxySpec } +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + var map_Scheduler = map[string]string{ "": "Scheduler holds cluster-wide information about Scheduler. The canonical name is `cluster`", "metadata": "Standard object's metadata.", diff --git a/vendor/github.com/openshift/api/image/v1/consts.go b/vendor/github.com/openshift/api/image/v1/consts.go new file mode 100644 index 000000000..11f57a44a --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/consts.go @@ -0,0 +1,69 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +const ( + // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry. + ManagedByOpenShiftAnnotation = "openshift.io/image.managed" + + // DockerImageRepositoryCheckAnnotation indicates that OpenShift has + // attempted to import tag and image information from an external Docker + // image repository. + DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck" + + // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content. + InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository" + + // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets. + ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret" + + // DockerImageLayersOrderAnnotation describes layers order in the docker image. + DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder" + + // DockerImageLayersOrderAscending indicates that image layers are sorted in + // the order of their addition (from oldest to latest) + DockerImageLayersOrderAscending = "ascending" + + // DockerImageLayersOrderDescending indicates that layers are sorted in + // reversed order of their addition (from newest to oldest). + DockerImageLayersOrderDescending = "descending" + + // ImporterPreferArchAnnotation represents an architecture that should be + // selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch" + + // ImporterPreferOSAnnotation represents an operation system that should + // be selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os" + + // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on + // storage of integrated Docker registry. + ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored" + + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" + + // ResourceImageStreams represents a number of image streams in a project. + ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" + + // ResourceImageStreamImages represents a number of unique references to images in all image stream + // statuses of a project. + ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" + + // ResourceImageStreamTags represents a number of unique references to images in all image stream specs + // of a project. + ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" + + // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set + // the maximum size of an image. + LimitTypeImage corev1.LimitType = "openshift.io/Image" + + // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number + // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". + LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" + + // The supported type of image signature. + ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1" +) diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index 90b70f467..652494250 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -6,27 +6,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -const ( - // ResourceImageStreams represents a number of image streams in a project. - ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" - - // ResourceImageStreamImages represents a number of unique references to images in all image stream - // statuses of a project. - ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" - - // ResourceImageStreamTags represents a number of unique references to images in all image stream specs - // of a project. - ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" - - // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set - // the maximum size of an image. - LimitTypeImage corev1.LimitType = "openshift.io/Image" - - // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number - // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". - LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" -) - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ImageList is a list of Image objects. diff --git a/vendor/github.com/openshift/api/security/v1/consts.go b/vendor/github.com/openshift/api/security/v1/consts.go new file mode 100644 index 000000000..28f8e5ae6 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/consts.go @@ -0,0 +1,10 @@ +package v1 + +const ( + UIDRangeAnnotation = "openshift.io/sa.scc.uid-range" + // SupplementalGroupsAnnotation contains a comma delimited list of allocated supplemental groups + // for the namespace. Groups are in the form of a Block which supports {start}/{length} or {start}-{end} + SupplementalGroupsAnnotation = "openshift.io/sa.scc.supplemental-groups" + MCSAnnotation = "openshift.io/sa.scc.mcs" + ValidatedSCCAnnotation = "openshift.io/scc" +) diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 06c48ce5f..2232a83dc 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -20,6 +20,7 @@ var AllowAllCapabilities corev1.Capability = "*" // That exposure is deprecated and will be removed in a future release - users // should instead use the security.openshift.io group to manage // SecurityContextConstraints. +// +kubebuilder:singular=securitycontextconstraint type SecurityContextConstraints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -32,6 +33,7 @@ type SecurityContextConstraints struct { // for multiple SCCs are equal they will be sorted from most restrictive to // least restrictive. If both priorities and restrictions are equal the // SCCs will be sorted by name. + // +nullable Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` // AllowPrivilegedContainer determines if a container can request to be run as privileged. @@ -39,14 +41,17 @@ type SecurityContextConstraints struct { // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. + // +nullable RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. + // +nullable AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false @@ -54,6 +59,7 @@ type SecurityContextConstraints struct { // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. + // +nullable Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes @@ -80,12 +86,16 @@ type SecurityContextConstraints struct { // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system @@ -108,6 +118,7 @@ type SecurityContextConstraints struct { // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. + // +nullable SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -164,6 +175,7 @@ var ( FSPortworxVolume FSType = "portworxVolume" FSScaleIO FSType = "scaleIO" FSStorageOS FSType = "storageOS" + FSTypeCSI FSType = "csi" FSTypeAll FSType = "*" FSTypeNone FSType = "none" ) diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go index 6dbad4fc2..d711e3a37 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go @@ -12,8 +12,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ConfigV1() configv1.ConfigV1Interface - // Deprecated: please explicitly pick a version if possible. - Config() configv1.ConfigV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -28,12 +26,6 @@ func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { return c.configV1 } -// Deprecated: Config retrieves the default version of ConfigClient. -// Please explicitly pick a version. -func (c *Clientset) Config() configv1.ConfigV1Interface { - return c.configV1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go index 8e64843f8..e7f280f61 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go @@ -59,8 +59,3 @@ var _ clientset.Interface = &Clientset{} func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { return &fakeconfigv1.FakeConfigV1{Fake: &c.Fake} } - -// Config retrieves the ConfigV1Client -func (c *Clientset) Config() configv1.ConfigV1Interface { - return &fakeconfigv1.FakeConfigV1{Fake: &c.Fake} -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go index 9cd56e5ab..7ae1b98b1 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go @@ -78,6 +78,17 @@ func (c *FakeProxies) Update(proxy *configv1.Proxy) (result *configv1.Proxy, err return obj.(*configv1.Proxy), err } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeProxies) UpdateStatus(proxy *configv1.Proxy) (*configv1.Proxy, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(proxiesResource, "status", proxy), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + // Delete takes name of the proxy and deletes it. Returns an error if one occurs. func (c *FakeProxies) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go index 50fa6b49d..189bf0ca2 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/proxy.go @@ -23,6 +23,7 @@ type ProxiesGetter interface { type ProxyInterface interface { Create(*v1.Proxy) (*v1.Proxy, error) Update(*v1.Proxy) (*v1.Proxy, error) + UpdateStatus(*v1.Proxy) (*v1.Proxy, error) Delete(name string, options *metav1.DeleteOptions) error DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error Get(name string, options metav1.GetOptions) (*v1.Proxy, error) @@ -109,6 +110,21 @@ func (c *proxies) Update(proxy *v1.Proxy) (result *v1.Proxy, err error) { return } +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *proxies) UpdateStatus(proxy *v1.Proxy) (result *v1.Proxy, err error) { + result = &v1.Proxy{} + err = c.client.Put(). + Resource("proxies"). + Name(proxy.Name). + SubResource("status"). + Body(proxy). + Do(). + Into(result) + return +} + // Delete takes name of the proxy and deletes it. Returns an error if one occurs. func (c *proxies) Delete(name string, options *metav1.DeleteOptions) error { return c.client.Delete().