From 4df8095bf06c063ea47844e6a449285547098143 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jan 2026 15:06:35 +0000 Subject: [PATCH 1/6] :seedling: Bump github.com/sirupsen/logrus from 1.9.3 to 1.9.4 (#3740) Bumps [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) from 1.9.3 to 1.9.4. - [Release notes](https://github.com/sirupsen/logrus/releases) - [Changelog](https://github.com/sirupsen/logrus/blob/master/CHANGELOG.md) - [Commits](https://github.com/sirupsen/logrus/compare/v1.9.3...v1.9.4) --- updated-dependencies: - dependency-name: github.com/sirupsen/logrus dependency-version: 1.9.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Upstream-repository: operator-lifecycle-manager Upstream-commit: 74911fcd295a91d9a3f407a8340ece0d12ecdac3 --- go.mod | 2 +- go.sum | 6 +- staging/operator-lifecycle-manager/go.mod | 2 +- staging/operator-lifecycle-manager/go.sum | 6 +- .../github.com/sirupsen/logrus/.golangci.yml | 95 ++++++++----- .../github.com/sirupsen/logrus/CHANGELOG.md | 4 +- vendor/github.com/sirupsen/logrus/README.md | 126 +++++++++--------- .../github.com/sirupsen/logrus/appveyor.yml | 16 +-- vendor/github.com/sirupsen/logrus/entry.go | 25 ++-- vendor/github.com/sirupsen/logrus/hooks.go | 8 +- vendor/github.com/sirupsen/logrus/logger.go | 34 ++--- vendor/github.com/sirupsen/logrus/logrus.go | 20 +-- .../sirupsen/logrus/terminal_check_bsd.go | 2 +- .../sirupsen/logrus/terminal_check_unix.go | 2 + .../sirupsen/logrus/terminal_check_wasi.go | 8 ++ .../sirupsen/logrus/terminal_check_wasip1.go | 8 ++ .../sirupsen/logrus/text_formatter.go | 3 +- vendor/modules.txt | 4 +- 18 files changed, 211 insertions(+), 160 deletions(-) create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasi.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go diff --git a/go.mod b/go.mod index a318cb41bc..34a846cea9 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/operator-framework/api v0.37.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 github.com/operator-framework/operator-registry v1.61.0 - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 diff --git a/go.sum b/go.sum index 3f383f10ef..8444576b85 100644 --- a/go.sum +++ b/go.sum @@ -480,8 +480,8 @@ github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= @@ -517,7 +517,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -732,7 +731,6 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index 3b59b8403c..fc2372fba6 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -27,7 +27,7 @@ require ( github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.67.5 - github.com/sirupsen/logrus v1.9.3 + github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 8d6b407435..3a28495536 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -365,8 +365,8 @@ github.com/sigstore/protobuf-specs v0.4.3 h1:kRgJ+ciznipH9xhrkAbAEHuuxD3GhYnGC87 github.com/sigstore/protobuf-specs v0.4.3/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= github.com/sigstore/sigstore v1.9.5/go.mod h1:VtxgvGqCmEZN9X2zhFSOkfXxvKUjpy8RpUW39oCtoII= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= @@ -386,7 +386,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -540,7 +539,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml index 65dc285037..792db36181 100644 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -1,40 +1,67 @@ +version: "2" run: - # do not run on test files yet tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - linters: enable: - - megacheck - - govet + - asasalint + - asciicheck + - bidichk + - bodyclose + - contextcheck + - durationcheck + - errchkjson + - errorlint + - exhaustive + - gocheckcompilerdirectives + - gochecksumtype + - gosec + - gosmopolitan + - loggercheck + - makezero + - musttag + - nilerr + - nilnesserr + - noctx + - protogetter + - reassign + - recvcheck + - rowserrcheck + - spancheck + - sqlclosecheck + - testifylint + - unparam + - zerologlint disable: - - maligned - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false + settings: + errcheck: + check-type-assertions: false + check-blank: false + lll: + line-length: 100 + tab-width: 4 + prealloc: + simple: false + range-loops: false + for-loops: false + whitespace: + multi-if: false + multi-func: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 7567f61289..098608ff4b 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -37,7 +37,7 @@ Features: # 1.6.0 Fixes: * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * revert the entry concurrency bug fix which leads to deadlock under some circumstances * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 Features: @@ -129,7 +129,7 @@ This new release introduces: which is mostly useful for logger wrapper * a fix reverting the immutability of the entry given as parameter to the hooks a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary + in a nested dictionary * a new SetOutput method in the Logger * a new configuration of the textformatter to configure the name of the default keys * a new configuration of the text formatter to disable the level truncation diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index d1d4a85fd7..cc5dab7eb7 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -40,7 +40,7 @@ plain text): ![Colored](http://i.imgur.com/PY7qMwd.png) -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +With `logrus.SetFormatter(&logrus.JSONFormatter{})`, for easy parsing by logstash or Splunk: ```text @@ -60,9 +60,9 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} "time":"2014-03-10 19:57:38.562543128 -0400 EDT"} ``` -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +With the default `logrus.SetFormatter(&logrus.TextFormatter{})` when a TTY is not attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: +[logfmt](https://pkg.go.dev/github.com/kr/logfmt) format: ```text time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 @@ -75,17 +75,18 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 To ensure this behaviour even if a TTY is attached, set your formatter as follows: ```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) +logrus.SetFormatter(&logrus.TextFormatter{ + DisableColors: true, + FullTimestamp: true, +}) ``` #### Logging Method Name If you wish to add the calling method as a field, instruct the logger via: + ```go -log.SetReportCaller(true) +logrus.SetReportCaller(true) ``` This adds the caller as 'method' like so: @@ -100,11 +101,11 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your environment via benchmarks: -``` + +```bash go test -bench=.*CallerTracing ``` - #### Case-sensitivity The organization's name was changed to lower-case--and this will not be changed @@ -118,12 +119,10 @@ The simplest way to use Logrus is simply the package-level exported logger: ```go package main -import ( - log "github.com/sirupsen/logrus" -) +import "github.com/sirupsen/logrus" func main() { - log.WithFields(log.Fields{ + logrus.WithFields(logrus.Fields{ "animal": "walrus", }).Info("A walrus appears") } @@ -139,6 +138,7 @@ package main import ( "os" + log "github.com/sirupsen/logrus" ) @@ -190,26 +190,27 @@ package main import ( "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() +var logger = logrus.New() func main() { // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout + // exported logger. See Godoc. + logger.Out = os.Stdout // You could set this to any `io.Writer` such as a file // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) // if err == nil { - // log.Out = file + // logger.Out = file // } else { - // log.Info("Failed to log to file, using default stderr") + // logger.Info("Failed to log to file, using default stderr") // } - log.WithFields(logrus.Fields{ + logger.WithFields(logrus.Fields{ "animal": "walrus", "size": 10, }).Info("A group of walrus emerges from the ocean") @@ -219,12 +220,12 @@ func main() { #### Fields Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +long, unparseable error messages. For example, instead of: `logrus.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: ```go -log.WithFields(log.Fields{ +logrus.WithFields(logrus.Fields{ "event": event, "topic": topic, "key": key, @@ -245,12 +246,12 @@ seen as a hint you should add a field, however, you can still use the Often it's helpful to have fields _always_ attached to log statements in an application or parts of one. For example, you may want to always log the `request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +`logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip})` on every line, you can create a `logrus.Entry` to pass around instead: ```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger := logger.WithFields(logrus.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") // will log request_id and user_ip requestLogger.Warn("something not great happened") ``` @@ -264,28 +265,31 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in `init`: ```go +package main + import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" + + "github.com/sirupsen/logrus" + airbrake "gopkg.in/gemnasium/logrus-airbrake-hook.v2" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" ) func init() { // Use the Airbrake hook to report errors that have Error severity or above to // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) + logrus.AddHook(airbrake.NewHook(123, "xyz", "production")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { - log.Error("Unable to connect to local syslog daemon") + logrus.Error("Unable to connect to local syslog daemon") } else { - log.AddHook(hook) + logrus.AddHook(hook) } } ``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). +Note: Syslog hooks also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) @@ -295,15 +299,15 @@ A list of currently known service hooks can be found in this wiki [page](https:/ Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") +logrus.Trace("Something very low level.") +logrus.Debug("Useful debugging information.") +logrus.Info("Something noteworthy happened!") +logrus.Warn("You should probably take a look at this.") +logrus.Error("Something failed but I'm not quitting.") // Calls os.Exit(1) after logging -log.Fatal("Bye.") +logrus.Fatal("Bye.") // Calls panic() after logging -log.Panic("I'm bailing.") +logrus.Panic("I'm bailing.") ``` You can set the logging level on a `Logger`, then it will only log entries with @@ -311,13 +315,13 @@ that severity or anything above it: ```go // Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) +logrus.SetLevel(logrus.InfoLevel) ``` -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +It may be useful to set `logrus.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. -Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). +Note: If you want different log levels for global (`logrus.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). #### Entries @@ -340,17 +344,17 @@ could do: ```go import ( - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) + logrus.SetFormatter(&logrus.JSONFormatter{}) } else { // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) + logrus.SetFormatter(&logrus.TextFormatter{}) } } ``` @@ -372,11 +376,11 @@ The built-in logging formatters are: * When colors are enabled, levels are truncated to 4 characters by default. To disable truncation set the `DisableLevelTruncation` field to `true`. * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + * All options are listed in the [generated docs](https://pkg.go.dev/github.com/sirupsen/logrus#JSONFormatter). -Third party logging formatters: +Third-party logging formatters: * [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. * [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). @@ -384,7 +388,7 @@ Third party logging formatters: * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. * [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Save log to files. * [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. You can define your formatter by implementing the `Formatter` interface, @@ -393,10 +397,9 @@ requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a default ones (see Entries section above): ```go -type MyJSONFormatter struct { -} +type MyJSONFormatter struct{} -log.SetFormatter(new(MyJSONFormatter)) +logrus.SetFormatter(new(MyJSONFormatter)) func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { // Note this doesn't include Time, Level and Message which are available on @@ -455,17 +458,18 @@ entries. It should not be a feature of the application-level logger. #### Testing -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: +Logrus has a built-in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: * decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go import( + "testing" + "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" - "testing" ) func TestSomething(t*testing.T){ @@ -486,15 +490,15 @@ func TestSomething(t*testing.T){ Logrus can register one or more functions that will be called when any `fatal` level message is logged. The registered handlers will be executed before logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. +to gracefully shut down. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. -``` -... +```go +// ... handler := func() { - // gracefully shutdown something... + // gracefully shut down something... } logrus.RegisterExitHandler(handler) -... +// ... ``` #### Thread safety @@ -502,7 +506,7 @@ logrus.RegisterExitHandler(handler) By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. -Situation when locking is not needed includes: +Situations when locking is not needed include: * You have no hooks registered, or hooks calling is already thread-safe. diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml index df9d65c3a5..e90f09ea68 100644 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -1,14 +1,12 @@ -version: "{build}" +# Minimal stub to satisfy AppVeyor CI +version: 1.0.{build} platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath +shallow_clone: true + branches: only: - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version + - main + build_script: - - go get -t - - go test + - echo "No-op build to satisfy AppVeyor CI" diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 71cdbbc35d..71d796d0b1 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -34,13 +34,15 @@ func init() { minimumCallerDepth = 1 } -// Defines the key when adding errors using WithError. +// ErrorKey defines the key when adding errors using [WithError], [Logger.WithError]. var ErrorKey = "error" -// An entry is the final or intermediate Logrus logging entry. It contains all +// Entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Trace, Debug, // Info, Warn, Error, Fatal or Panic is called on it. These objects can be // reused and passed around as much as you wish to avoid field duplication. +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Entry struct { Logger *Logger @@ -86,12 +88,12 @@ func (entry *Entry) Dup() *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} } -// Returns the bytes representation of this entry from the formatter. +// Bytes returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } -// Returns the string representation from the reader and ultimately the +// String returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { serialized, err := entry.Bytes() @@ -102,12 +104,13 @@ func (entry *Entry) String() (string, error) { return str, nil } -// Add an error as single field (using the key defined in ErrorKey) to the Entry. +// WithError adds an error as single field (using the key defined in [ErrorKey]) +// to the Entry. func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } -// Add a context to the Entry. +// WithContext adds a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -116,12 +119,12 @@ func (entry *Entry) WithContext(ctx context.Context) *Entry { return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } -// Add a single field to the Entry. +// WithField adds a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } -// Add a map of fields to the Entry. +// WithFields adds a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { @@ -150,7 +153,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } -// Overrides the time of the Entry. +// WithTime overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { @@ -204,7 +207,7 @@ func getCaller() *runtime.Frame { // If the caller isn't part of this package, we're done if pkg != logrusPackage { - return &f //nolint:scopelint + return &f } } @@ -432,7 +435,7 @@ func (entry *Entry) Panicln(args ...interface{}) { entry.Logln(PanicLevel, args...) } -// Sprintlnn => Sprint no newline. This is to get the behavior of how +// sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go index 3f151cdc39..9ab978a457 100644 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -1,16 +1,16 @@ package logrus -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not +// Hook describes hooks to be fired when logging on the logging levels returned from +// [Hook.Levels] on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for +// functionality yourself if your call is non-blocking, and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } -// Internal type for storing the hooks on a logger instance. +// LevelHooks is an internal type for storing the hooks on a logger instance. type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 5ff0aef6d3..f5b8c439ee 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -72,16 +72,16 @@ func (mw *MutexWrap) Disable() { mw.disabled = true } -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just +// New Creates a new logger. Configuration should be set by changing [Formatter], +// Out and Hooks directly on the default Logger instance. You can also just // instantiate your own: // -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } // // It's recommended to make this a global instance called `log`. func New() *Logger { @@ -118,30 +118,30 @@ func (logger *Logger) WithField(key string, value interface{}) *Entry { return entry.WithField(key, value) } -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. +// WithFields adds a struct of fields to the log entry. It calls [Entry.WithField] +// for each Field. func (logger *Logger) WithFields(fields Fields) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithFields(fields) } -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. +// WithError adds an error as single field to the log entry. It calls +// [Entry.WithError] for the given error. func (logger *Logger) WithError(err error) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithError(err) } -// Add a context to the log entry. +// WithContext add a context to the log entry. func (logger *Logger) WithContext(ctx context.Context) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithContext(ctx) } -// Overrides the time of the log entry. +// WithTime overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) @@ -347,9 +347,9 @@ func (logger *Logger) Exit(code int) { logger.ExitFunc(code) } -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. +// SetNoLock disables the lock for situations where a file is opened with +// appending mode, and safe for concurrent writes to the file (within 4k +// message on Linux). In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index 2f16224cb9..37fc4fef85 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -6,13 +6,15 @@ import ( "strings" ) -// Fields type, used to pass to `WithFields`. +// Fields type, used to pass to [WithFields]. type Fields map[string]interface{} // Level type +// +//nolint:recvcheck // the methods of "Entry" use pointer receiver and non-pointer receiver. type Level uint32 -// Convert the Level to a string. E.g. PanicLevel becomes "panic". +// Convert the Level to a string. E.g. [PanicLevel] becomes "panic". func (level Level) String() string { if b, err := level.MarshalText(); err == nil { return string(b) @@ -77,7 +79,7 @@ func (level Level) MarshalText() ([]byte, error) { return nil, fmt.Errorf("not a valid logrus level %d", level) } -// A constant exposing all logging levels +// AllLevels exposing all logging levels. var AllLevels = []Level{ PanicLevel, FatalLevel, @@ -119,8 +121,8 @@ var ( ) // StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. +// it'll accept a stdlib logger ([log.Logger]) and a logrus logger. +// There's no standard interface, so this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) @@ -135,7 +137,8 @@ type StdLogger interface { Panicln(...interface{}) } -// The FieldLogger interface generalizes the Entry and Logger types +// FieldLogger extends the [StdLogger] interface, generalizing +// the [Entry] and [Logger] types. type FieldLogger interface { WithField(key string, value interface{}) *Entry WithFields(fields Fields) *Entry @@ -176,8 +179,9 @@ type FieldLogger interface { // IsPanicEnabled() bool } -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. +// Ext1FieldLogger (the first extension to [FieldLogger]) is superfluous, it is +// here for consistency. Do not use. Use [FieldLogger], [Logger] or [Entry] +// instead. type Ext1FieldLogger interface { FieldLogger Tracef(format string, args ...interface{}) diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index 499789984d..69956b425a 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -1,4 +1,4 @@ -// +build darwin dragonfly freebsd netbsd openbsd +// +build darwin dragonfly freebsd netbsd openbsd hurd // +build !js package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 04748b8515..c9aed267a4 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -1,5 +1,7 @@ +//go:build (linux || aix || zos) && !js && !wasi // +build linux aix zos // +build !js +// +build !wasi package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go new file mode 100644 index 0000000000..2822b212fb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasi.go @@ -0,0 +1,8 @@ +//go:build wasi +// +build wasi + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go new file mode 100644 index 0000000000..108a6be12b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_wasip1.go @@ -0,0 +1,8 @@ +//go:build wasip1 +// +build wasip1 + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index be2c6efe5e..6dfeb18b10 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -306,6 +306,7 @@ func (f *TextFormatter) needsQuoting(text string) bool { return false } for _, ch := range text { + //nolint:staticcheck // QF1001: could apply De Morgan's law if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || @@ -334,6 +335,6 @@ func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { if !f.needsQuoting(stringVal) { b.WriteString(stringVal) } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) + fmt.Fprintf(b, "%q", stringVal) } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 7081ea8f3d..942da8a821 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -856,8 +856,8 @@ github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature github.com/sigstore/sigstore/pkg/signature/options github.com/sigstore/sigstore/pkg/signature/payload -# github.com/sirupsen/logrus v1.9.3 -## explicit; go 1.13 +# github.com/sirupsen/logrus v1.9.4 +## explicit; go 1.17 github.com/sirupsen/logrus # github.com/smallstep/pkcs7 v0.2.1 ## explicit; go 1.14 From dcd5e215f3885d59c53cff0a5df91c2a9ed2665d Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Wed, 21 Jan 2026 00:08:14 +0800 Subject: [PATCH 2/6] Fix nil pointer dereference panic in event recorder (#3727) Upstream-repository: operator-lifecycle-manager Upstream-commit: 34ea6ce507501c025f7cfe42f628b131238b4b09 --- .../pkg/lib/event/event.go | 99 +++++- .../pkg/lib/event/event_test.go | 312 ++++++++++++++++++ .../pkg/lib/event/event.go | 99 +++++- 3 files changed, 488 insertions(+), 22 deletions(-) create mode 100644 staging/operator-lifecycle-manager/pkg/lib/event/event_test.go diff --git a/staging/operator-lifecycle-manager/pkg/lib/event/event.go b/staging/operator-lifecycle-manager/pkg/lib/event/event.go index 34382d35f8..6eefcfbc5c 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/event/event.go +++ b/staging/operator-lifecycle-manager/pkg/lib/event/event.go @@ -4,6 +4,8 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" @@ -22,24 +24,99 @@ func init() { } } +// safeSpamKeyFunc builds a spam key from event fields with nil checks to prevent panics. +// This protects against nil pointer dereferences when event.InvolvedObject fields are empty. +func safeSpamKeyFunc(event *v1.Event) string { + if event == nil { + return "unknown/unknown/unknown/unknown" + } + + kind := event.InvolvedObject.Kind + namespace := event.InvolvedObject.Namespace + name := event.InvolvedObject.Name + reason := event.Reason + + // Provide defaults for empty fields to avoid issues + if kind == "" { + kind = "Unknown" + } + if name == "" { + name = "unknown" + } + + return fmt.Sprintf("%s/%s/%s/%s", kind, namespace, name, reason) +} + +// SafeEventRecorder wraps record.EventRecorder with nil checks to prevent panics +// when recording events for objects with nil or invalid metadata. +type SafeEventRecorder struct { + recorder record.EventRecorder +} + +// isValidObject checks if the object has valid metadata required for event recording. +func isValidObject(object runtime.Object) bool { + if object == nil { + return false + } + + // Handle ObjectReference type (used for events with FieldPath) + if ref, ok := object.(*v1.ObjectReference); ok { + return ref.Name != "" + } + + // Check if object implements metav1.Object interface + accessor, ok := object.(metav1.Object) + if !ok { + return false + } + + // Ensure the object has a valid name (required for event recording) + if accessor.GetName() == "" { + return false + } + + return true +} + +// Event records an event for the given object, with nil checks. +func (s *SafeEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, message=%s", reason, message) + return + } + s.recorder.Event(object, eventtype, reason, message) +} + +// Eventf records a formatted event for the given object, with nil checks. +func (s *SafeEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, messageFmt=%s", reason, messageFmt) + return + } + s.recorder.Eventf(object, eventtype, reason, messageFmt, args...) +} + +// AnnotatedEventf records a formatted event with annotations for the given object, with nil checks. +func (s *SafeEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, messageFmt=%s", reason, messageFmt) + return + } + s.recorder.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) +} + // NewRecorder returns an EventRecorder type that can be // used to post Events to different object's lifecycles. +// The returned recorder includes nil checks to prevent panics from invalid objects. func NewRecorder(event typedcorev1.EventInterface) (record.EventRecorder, error) { eventBroadcaster := record.NewBroadcasterWithCorrelatorOptions(record.CorrelatorOptions{ - BurstSize: 10, - SpamKeyFunc: func(event *v1.Event) string { - return fmt.Sprintf( - "%s/%s/%s/%s", - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - event.Reason, - ) - }, + BurstSize: 10, + SpamKeyFunc: safeSpamKeyFunc, }) eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: event}) recorder := eventBroadcaster.NewRecorder(s, v1.EventSource{Component: component}) - return recorder, nil + // Wrap the recorder with SafeEventRecorder for nil protection + return &SafeEventRecorder{recorder: recorder}, nil } diff --git a/staging/operator-lifecycle-manager/pkg/lib/event/event_test.go b/staging/operator-lifecycle-manager/pkg/lib/event/event_test.go new file mode 100644 index 0000000000..67306c6c2a --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/event/event_test.go @@ -0,0 +1,312 @@ +package event + +import ( + "testing" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" +) + +func TestSafeSpamKeyFunc(t *testing.T) { + tests := []struct { + name string + event *v1.Event + expected string + }{ + { + name: "nil event", + event: nil, + expected: "unknown/unknown/unknown/unknown", + }, + { + name: "empty event", + event: &v1.Event{ + InvolvedObject: v1.ObjectReference{}, + }, + expected: "Unknown//unknown/", + }, + { + name: "valid event", + event: &v1.Event{ + InvolvedObject: v1.ObjectReference{ + Kind: "Pod", + Namespace: "default", + Name: "test-pod", + }, + Reason: "Created", + }, + expected: "Pod/default/test-pod/Created", + }, + { + name: "event with empty kind", + event: &v1.Event{ + InvolvedObject: v1.ObjectReference{ + Namespace: "default", + Name: "test-pod", + }, + Reason: "Created", + }, + expected: "Unknown/default/test-pod/Created", + }, + { + name: "event with empty name", + event: &v1.Event{ + InvolvedObject: v1.ObjectReference{ + Kind: "Pod", + Namespace: "default", + }, + Reason: "Created", + }, + expected: "Pod/default/unknown/Created", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := safeSpamKeyFunc(tt.event) + if result != tt.expected { + t.Errorf("safeSpamKeyFunc() = %q, expected %q", result, tt.expected) + } + }) + } +} + +func TestIsValidObject(t *testing.T) { + tests := []struct { + name string + object runtime.Object + expected bool + }{ + { + name: "nil object", + object: nil, + expected: false, + }, + { + name: "valid pod", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + }, + expected: true, + }, + { + name: "pod with empty name", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + }, + expected: false, + }, + { + name: "valid namespace", + object: &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns", + }, + }, + expected: true, + }, + { + name: "valid ObjectReference", + object: &v1.ObjectReference{ + Kind: "InstallPlan", + Namespace: "default", + Name: "test-plan", + FieldPath: "status.plan[0]", + }, + expected: true, + }, + { + name: "ObjectReference with empty name", + object: &v1.ObjectReference{ + Kind: "InstallPlan", + Namespace: "default", + FieldPath: "status.plan[0]", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isValidObject(tt.object) + if result != tt.expected { + t.Errorf("isValidObject() = %v, expected %v", result, tt.expected) + } + }) + } +} + +// mockEventRecorder is a mock implementation of record.EventRecorder for testing +type mockEventRecorder struct { + events []string +} + +func (m *mockEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { + m.events = append(m.events, reason+":"+message) +} + +func (m *mockEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + m.events = append(m.events, reason+":"+messageFmt) +} + +func (m *mockEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + m.events = append(m.events, reason+":"+messageFmt) +} + +// Ensure mockEventRecorder implements record.EventRecorder +var _ record.EventRecorder = &mockEventRecorder{} + +func TestSafeEventRecorder_Event(t *testing.T) { + tests := []struct { + name string + object runtime.Object + expectRecorded bool + }{ + { + name: "nil object - should not record", + object: nil, + expectRecorded: false, + }, + { + name: "valid object - should record", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + }, + expectRecorded: true, + }, + { + name: "object with empty name - should not record", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + }, + expectRecorded: false, + }, + { + name: "valid ObjectReference - should record", + object: &v1.ObjectReference{ + Kind: "InstallPlan", + Namespace: "default", + Name: "test-plan", + FieldPath: "status.plan[0]", + }, + expectRecorded: true, + }, + { + name: "ObjectReference with empty name - should not record", + object: &v1.ObjectReference{ + Kind: "InstallPlan", + Namespace: "default", + }, + expectRecorded: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := &mockEventRecorder{} + safe := &SafeEventRecorder{recorder: mock} + + safe.Event(tt.object, v1.EventTypeNormal, "TestReason", "Test message") + + if tt.expectRecorded && len(mock.events) != 1 { + t.Errorf("Expected event to be recorded, but got %d events", len(mock.events)) + } + if !tt.expectRecorded && len(mock.events) != 0 { + t.Errorf("Expected no events to be recorded, but got %d events", len(mock.events)) + } + }) + } +} + +func TestSafeEventRecorder_Eventf(t *testing.T) { + tests := []struct { + name string + object runtime.Object + expectRecorded bool + }{ + { + name: "nil object - should not record", + object: nil, + expectRecorded: false, + }, + { + name: "valid object - should record", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + }, + expectRecorded: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := &mockEventRecorder{} + safe := &SafeEventRecorder{recorder: mock} + + safe.Eventf(tt.object, v1.EventTypeNormal, "TestReason", "Test message %s", "arg") + + if tt.expectRecorded && len(mock.events) != 1 { + t.Errorf("Expected event to be recorded, but got %d events", len(mock.events)) + } + if !tt.expectRecorded && len(mock.events) != 0 { + t.Errorf("Expected no events to be recorded, but got %d events", len(mock.events)) + } + }) + } +} + +func TestSafeEventRecorder_AnnotatedEventf(t *testing.T) { + tests := []struct { + name string + object runtime.Object + expectRecorded bool + }{ + { + name: "nil object - should not record", + object: nil, + expectRecorded: false, + }, + { + name: "valid object - should record", + object: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + }, + expectRecorded: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mock := &mockEventRecorder{} + safe := &SafeEventRecorder{recorder: mock} + + annotations := map[string]string{"key": "value"} + safe.AnnotatedEventf(tt.object, annotations, v1.EventTypeNormal, "TestReason", "Test message %s", "arg") + + if tt.expectRecorded && len(mock.events) != 1 { + t.Errorf("Expected event to be recorded, but got %d events", len(mock.events)) + } + if !tt.expectRecorded && len(mock.events) != 0 { + t.Errorf("Expected no events to be recorded, but got %d events", len(mock.events)) + } + }) + } +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event/event.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event/event.go index 34382d35f8..6eefcfbc5c 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event/event.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event/event.go @@ -4,6 +4,8 @@ import ( "fmt" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" @@ -22,24 +24,99 @@ func init() { } } +// safeSpamKeyFunc builds a spam key from event fields with nil checks to prevent panics. +// This protects against nil pointer dereferences when event.InvolvedObject fields are empty. +func safeSpamKeyFunc(event *v1.Event) string { + if event == nil { + return "unknown/unknown/unknown/unknown" + } + + kind := event.InvolvedObject.Kind + namespace := event.InvolvedObject.Namespace + name := event.InvolvedObject.Name + reason := event.Reason + + // Provide defaults for empty fields to avoid issues + if kind == "" { + kind = "Unknown" + } + if name == "" { + name = "unknown" + } + + return fmt.Sprintf("%s/%s/%s/%s", kind, namespace, name, reason) +} + +// SafeEventRecorder wraps record.EventRecorder with nil checks to prevent panics +// when recording events for objects with nil or invalid metadata. +type SafeEventRecorder struct { + recorder record.EventRecorder +} + +// isValidObject checks if the object has valid metadata required for event recording. +func isValidObject(object runtime.Object) bool { + if object == nil { + return false + } + + // Handle ObjectReference type (used for events with FieldPath) + if ref, ok := object.(*v1.ObjectReference); ok { + return ref.Name != "" + } + + // Check if object implements metav1.Object interface + accessor, ok := object.(metav1.Object) + if !ok { + return false + } + + // Ensure the object has a valid name (required for event recording) + if accessor.GetName() == "" { + return false + } + + return true +} + +// Event records an event for the given object, with nil checks. +func (s *SafeEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, message=%s", reason, message) + return + } + s.recorder.Event(object, eventtype, reason, message) +} + +// Eventf records a formatted event for the given object, with nil checks. +func (s *SafeEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, messageFmt=%s", reason, messageFmt) + return + } + s.recorder.Eventf(object, eventtype, reason, messageFmt, args...) +} + +// AnnotatedEventf records a formatted event with annotations for the given object, with nil checks. +func (s *SafeEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + if !isValidObject(object) { + klog.V(4).Infof("Skipping event recording: invalid object (nil or missing name), reason=%s, messageFmt=%s", reason, messageFmt) + return + } + s.recorder.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) +} + // NewRecorder returns an EventRecorder type that can be // used to post Events to different object's lifecycles. +// The returned recorder includes nil checks to prevent panics from invalid objects. func NewRecorder(event typedcorev1.EventInterface) (record.EventRecorder, error) { eventBroadcaster := record.NewBroadcasterWithCorrelatorOptions(record.CorrelatorOptions{ - BurstSize: 10, - SpamKeyFunc: func(event *v1.Event) string { - return fmt.Sprintf( - "%s/%s/%s/%s", - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - event.Reason, - ) - }, + BurstSize: 10, + SpamKeyFunc: safeSpamKeyFunc, }) eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: event}) recorder := eventBroadcaster.NewRecorder(s, v1.EventSource{Component: component}) - return recorder, nil + // Wrap the recorder with SafeEventRecorder for nil protection + return &SafeEventRecorder{recorder: recorder}, nil } From da02662decc220807c1c790eb34716d06b55cde1 Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Tue, 20 Jan 2026 20:00:49 +0000 Subject: [PATCH 3/6] Add e2e test for operator workload persistence after catalog deletion (#3737) Assisted-by: Cursor/CLAUDE Upstream-repository: operator-lifecycle-manager Upstream-commit: afb9e80a6649342469da7fd1acb1e37291ef68e9 --- .../test/e2e/catalog_e2e_test.go | 403 +++++++++++++++++- 1 file changed, 401 insertions(+), 2 deletions(-) diff --git a/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go b/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go index 5ed3181460..f003611e92 100644 --- a/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go +++ b/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -98,8 +99,8 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun defer func() { Eventually(func() error { - return ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}) - }).Should(Or(Succeed(), WithTransform(k8serror.IsNotFound, BeTrue()))) + return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &crd)) + }).Should(Succeed()) Eventually(func() error { return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &csv)) }).Should(Succeed()) @@ -1783,6 +1784,404 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun }) }) }) + + It("operator workload continues running after catalog source is deleted", func() { + By("Create CRD and CSV for operator") + packageName := genName("nginx-") + stableChannel := "stable" + packageStable := packageName + "-stable" + + crd := newCRD(genName("ins-")) + + // Create install strategy with permissions so OLM creates ServiceAccount, Role, and RoleBinding + serviceAccountName := genName("test-sa-") + permissions := []v1alpha1.StrategyDeploymentPermissions{ + { + ServiceAccountName: serviceAccountName, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + }, + } + strategy := newNginxInstallStrategy(genName("dep-"), permissions, nil) + csv := newCSV(packageStable, generatedNamespace.GetName(), "", semver.MustParse("0.1.0"), []apiextensionsv1.CustomResourceDefinition{crd}, nil, &strategy) + + defer func() { + Eventually(func() error { + return ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}) + }).Should(Or(Succeed(), WithTransform(k8serror.IsNotFound, BeTrue()))) + Eventually(func() error { + return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &csv)) + }).Should(Succeed()) + }() + + manifests := []registry.PackageManifest{ + { + PackageName: packageName, + Channels: []registry.PackageChannel{ + {Name: stableChannel, CurrentCSVName: packageStable}, + }, + DefaultChannelName: stableChannel, + }, + } + + By("Create catalog source") + catalogSourceName := genName("test-catalog-") + catalogSource, cleanupCatalogSource := createInternalCatalogSource(c, crc, catalogSourceName, generatedNamespace.GetName(), manifests, []apiextensionsv1.CustomResourceDefinition{crd}, []v1alpha1.ClusterServiceVersion{csv}) + defer cleanupCatalogSource() + + By("Wait for catalog source to be ready") + _, err := fetchCatalogSourceOnStatus(crc, catalogSourceName, generatedNamespace.GetName(), catalogSourceRegistryPodSynced()) + Expect(err).ShouldNot(HaveOccurred()) + + By("Create subscription") + subscriptionName := genName("test-subscription-") + cleanupSubscription := createSubscriptionForCatalog(crc, generatedNamespace.GetName(), subscriptionName, catalogSourceName, packageName, stableChannel, "", v1alpha1.ApprovalAutomatic) + defer cleanupSubscription() + + By("Wait for subscription to be at latest version") + subscription, err := fetchSubscription(crc, generatedNamespace.GetName(), subscriptionName, subscriptionStateAtLatestChecker()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(subscription).ShouldNot(BeNil()) + Expect(subscription.Status.InstalledCSV).To(Equal(packageStable)) + + By("Wait for CSV to succeed") + installedCSV, err := fetchCSV(crc, generatedNamespace.GetName(), subscription.Status.CurrentCSV, csvSucceededChecker) + Expect(err).ShouldNot(HaveOccurred()) + Expect(installedCSV).ShouldNot(BeNil()) + Expect(installedCSV.Status.Phase).To(Equal(v1alpha1.CSVPhaseSucceeded)) + + By("Get deployment name from CSV") + var deploymentName string + Expect(installedCSV.Spec.InstallStrategy.StrategyName).To(Equal(v1alpha1.InstallStrategyNameDeployment)) + strategyDetailsDeployment := installedCSV.Spec.InstallStrategy.StrategySpec + Expect(strategyDetailsDeployment.DeploymentSpecs).ToNot(BeEmpty()) + deploymentName = strategyDetailsDeployment.DeploymentSpecs[0].Name + + By("Wait for operator deployment to be ready") + var operatorDeployment *appsv1.Deployment + Eventually(func() error { + operatorDeployment, err = c.GetDeployment(generatedNamespace.GetName(), deploymentName) + if err != nil { + return err + } + if operatorDeployment.Spec.Replicas == nil || *operatorDeployment.Spec.Replicas == 0 { + return fmt.Errorf("deployment replicas is not set") + } + if operatorDeployment.Status.AvailableReplicas != *operatorDeployment.Spec.Replicas { + return fmt.Errorf("deployment %s not ready: %d/%d replicas available", + deploymentName, + operatorDeployment.Status.AvailableReplicas, + *operatorDeployment.Spec.Replicas) + } + if operatorDeployment.Status.ReadyReplicas != *operatorDeployment.Spec.Replicas { + return fmt.Errorf("deployment %s not ready: %d/%d replicas ready", + deploymentName, + operatorDeployment.Status.ReadyReplicas, + *operatorDeployment.Spec.Replicas) + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Record deployment state before catalog deletion") + deploymentUID := operatorDeployment.UID + expectedReplicas := *operatorDeployment.Spec.Replicas + + By("Verify ServiceAccount, Role, and RoleBinding created by OLM") + var serviceAccount *corev1.ServiceAccount + Eventually(func() error { + serviceAccount, err = c.KubernetesInterface().CoreV1().ServiceAccounts(generatedNamespace.GetName()).Get( + context.Background(), + serviceAccountName, + metav1.GetOptions{}, + ) + return err + }, pollDuration, pollInterval).Should(Succeed()) + serviceAccountUID := serviceAccount.UID + + // Roles and RoleBindings are owned by the CSV with generated names, so we list them by owner + ownerSelector := labels.SelectorFromSet(map[string]string{ + "olm.owner": installedCSV.GetName(), + "olm.owner.kind": "ClusterServiceVersion", + "olm.owner.namespace": generatedNamespace.GetName(), + }) + + var roleList *rbacv1.RoleList + Eventually(func() error { + roleList, err = c.KubernetesInterface().RbacV1().Roles(generatedNamespace.GetName()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ownerSelector.String()}, + ) + if err != nil { + return err + } + if len(roleList.Items) == 0 { + return fmt.Errorf("no roles found owned by CSV") + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + roleUID := roleList.Items[0].UID + + var roleBindingList *rbacv1.RoleBindingList + Eventually(func() error { + roleBindingList, err = c.KubernetesInterface().RbacV1().RoleBindings(generatedNamespace.GetName()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ownerSelector.String()}, + ) + if err != nil { + return err + } + if len(roleBindingList.Items) == 0 { + return fmt.Errorf("no rolebindings found owned by CSV") + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + roleBindingUID := roleBindingList.Items[0].UID + + By("Delete catalog source") + err = crc.OperatorsV1alpha1().CatalogSources(catalogSource.GetNamespace()).Delete(context.Background(), catalogSource.GetName(), metav1.DeleteOptions{}) + Expect(err).ShouldNot(HaveOccurred()) + + By("Wait for catalog source to be removed") + Eventually(func() error { + _, err := crc.OperatorsV1alpha1().CatalogSources(catalogSource.GetNamespace()).Get(context.Background(), catalogSource.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("catalog source still exists") + } + if !k8serror.IsNotFound(err) { + return err + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Wait for catalog source pod to be deleted") + Eventually(func() error { + listOpts := metav1.ListOptions{ + LabelSelector: "olm.catalogSource=" + catalogSourceName, + } + pods, err := c.KubernetesInterface().CoreV1().Pods(catalogSource.GetNamespace()).List(context.Background(), listOpts) + if err != nil { + return err + } + if len(pods.Items) > 0 { + return fmt.Errorf("catalog source pod still exists: %d pods found", len(pods.Items)) + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Verify subscription behavior after catalog deletion") + Eventually(func() error { + sub, err := crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Get( + context.Background(), + subscriptionName, + metav1.GetOptions{}, + ) + if err != nil { + return fmt.Errorf("failed to get subscription: %w", err) + } + + // Subscription should still track the installed CSV + if sub.Status.InstalledCSV != packageStable { + return fmt.Errorf("subscription InstalledCSV changed from %s to %s", packageStable, sub.Status.InstalledCSV) + } + + // Verify catalog health behavior: if the deleted catalog is still in the health list, + // it should be marked as unhealthy. If it's been removed from the list, that's also acceptable. + for _, health := range sub.Status.CatalogHealth { + if health.CatalogSourceRef != nil && health.CatalogSourceRef.Name == catalogSourceName { + if health.Healthy { + return fmt.Errorf("subscription still reports deleted catalog %s as healthy", catalogSourceName) + } + } + } + + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Verify CSV remains in succeeded state after catalog deletion") + Consistently(func() error { + fetchedCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(generatedNamespace.GetName()).Get( + context.Background(), + installedCSV.GetName(), + metav1.GetOptions{}, + ) + if err != nil { + return fmt.Errorf("failed to get CSV: %w", err) + } + if fetchedCSV.Status.Phase != v1alpha1.CSVPhaseSucceeded { + return fmt.Errorf("CSV phase is %s, expected Succeeded", fetchedCSV.Status.Phase) + } + return nil + }, 3*time.Minute, pollInterval).Should(Succeed()) + + By("Verify deployment remains healthy and unchanged") + Consistently(func() error { + deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) + if err != nil { + return fmt.Errorf("failed to get deployment: %w", err) + } + if deployment.UID != deploymentUID { + return fmt.Errorf("deployment was recreated") + } + if deployment.Spec.Replicas == nil { + return fmt.Errorf("deployment replicas is nil") + } + if deployment.Status.AvailableReplicas != expectedReplicas { + return fmt.Errorf("available replicas: got %d, want %d", deployment.Status.AvailableReplicas, expectedReplicas) + } + if deployment.Status.ReadyReplicas != expectedReplicas { + return fmt.Errorf("ready replicas: got %d, want %d", deployment.Status.ReadyReplicas, expectedReplicas) + } + return nil + }, 3*time.Minute, pollInterval).Should(Succeed()) + + By("Test OLM config management - add environment variable via subscription") + Eventually(func() error { + sub, err := crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Get( + context.Background(), + subscriptionName, + metav1.GetOptions{}, + ) + if err != nil { + return err + } + + if sub.Spec.Config == nil { + sub.Spec.Config = &v1alpha1.SubscriptionConfig{} + } + sub.Spec.Config.Env = []corev1.EnvVar{ + {Name: "TEST_ENV_VAR", Value: "test-value"}, + } + + _, err = crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Update( + context.Background(), + sub, + metav1.UpdateOptions{}, + ) + return err + }, pollDuration, pollInterval).Should(Succeed()) + + By("Wait for deployment to have the environment variable") + Eventually(func() error { + deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) + if err != nil { + return err + } + if len(deployment.Spec.Template.Spec.Containers) == 0 { + return fmt.Errorf("no containers in deployment") + } + container := deployment.Spec.Template.Spec.Containers[0] + for _, env := range container.Env { + if env.Name == "TEST_ENV_VAR" && env.Value == "test-value" { + return nil + } + } + return fmt.Errorf("TEST_ENV_VAR not found in deployment") + }, pollDuration, pollInterval).Should(Succeed()) + + By("Delete the operator deployment to test OLM reconciliation") + err = c.KubernetesInterface().AppsV1().Deployments(generatedNamespace.GetName()).Delete( + context.Background(), + deploymentName, + metav1.DeleteOptions{}, + ) + Expect(err).ShouldNot(HaveOccurred()) + + By("Wait for deployment to be deleted") + Eventually(func() error { + _, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) + if err == nil { + return fmt.Errorf("deployment still exists") + } + if !k8serror.IsNotFound(err) { + return err + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Wait for OLM to recreate the deployment") + Eventually(func() error { + deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) + if err != nil { + return fmt.Errorf("deployment not recreated yet: %w", err) + } + if deployment.UID == deploymentUID { + return fmt.Errorf("deployment UID unchanged, not recreated") + } + if deployment.Spec.Replicas == nil { + return fmt.Errorf("deployment replicas is nil") + } + if deployment.Status.AvailableReplicas != expectedReplicas { + return fmt.Errorf("available replicas: got %d, want %d", deployment.Status.AvailableReplicas, expectedReplicas) + } + if deployment.Status.ReadyReplicas != expectedReplicas { + return fmt.Errorf("ready replicas: got %d, want %d", deployment.Status.ReadyReplicas, expectedReplicas) + } + return nil + }, pollDuration, pollInterval).Should(Succeed()) + + By("Verify all resources were recreated by OLM with correct configuration") + recreatedDeployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) + Expect(err).ShouldNot(HaveOccurred()) + Expect(recreatedDeployment.UID).ToNot(Equal(deploymentUID), "deployment should have been recreated with new UID") + + recreatedServiceAccount, err := c.KubernetesInterface().CoreV1().ServiceAccounts(generatedNamespace.GetName()).Get( + context.Background(), + serviceAccountName, + metav1.GetOptions{}, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(recreatedServiceAccount.UID).To(Equal(serviceAccountUID), "serviceaccount should not have been recreated (same UID)") + + recreatedRoleList, err := c.KubernetesInterface().RbacV1().Roles(generatedNamespace.GetName()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ownerSelector.String()}, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(recreatedRoleList.Items)).To(BeNumerically(">", 0), "at least one role should exist") + Expect(recreatedRoleList.Items[0].UID).To(Equal(roleUID), "role should not have been recreated (same UID)") + + recreatedRoleBindingList, err := c.KubernetesInterface().RbacV1().RoleBindings(generatedNamespace.GetName()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ownerSelector.String()}, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(len(recreatedRoleBindingList.Items)).To(BeNumerically(">", 0), "at least one rolebinding should exist") + Expect(recreatedRoleBindingList.Items[0].UID).To(Equal(roleBindingUID), "rolebinding should not have been recreated (same UID)") + + // Verify the environment variable from subscription config is still present + Expect(len(recreatedDeployment.Spec.Template.Spec.Containers)).To(BeNumerically(">", 0)) + container := recreatedDeployment.Spec.Template.Spec.Containers[0] + envVarFound := false + for _, env := range container.Env { + if env.Name == "TEST_ENV_VAR" && env.Value == "test-value" { + envVarFound = true + break + } + } + Expect(envVarFound).To(BeTrue(), "TEST_ENV_VAR should be present in recreated deployment") + + By("Verify subscription still tracks installed CSV") + fetchedSubscription, err := crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Get( + context.Background(), + subscriptionName, + metav1.GetOptions{}, + ) + Expect(err).ShouldNot(HaveOccurred()) + Expect(fetchedSubscription.Status.InstalledCSV).To(Equal(packageStable)) + + By("Verify CRD still exists and is functional") + _, err = c.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get( + context.Background(), + crd.GetName(), + metav1.GetOptions{}, + ) + Expect(err).ShouldNot(HaveOccurred()) + }) }) func getOperatorDeployment(c operatorclient.ClientInterface, namespace string, operatorLabels labels.Set) (*appsv1.Deployment, error) { From c71393da9c9823a2bd12f312d125e32908b69864 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Wed, 21 Jan 2026 23:34:42 +0800 Subject: [PATCH 4/6] Fix server startup failure when client-ca is empty or not provided (#3742) Upstream-repository: operator-lifecycle-manager Upstream-commit: 01449fd033f5e4597c971d2ef14832c109d878d9 --- .../pkg/lib/server/server.go | 46 ++++++-- .../pkg/lib/server/server_test.go | 109 ++++++++++++++++++ .../pkg/lib/server/server.go | 46 ++++++-- 3 files changed, 177 insertions(+), 24 deletions(-) diff --git a/staging/operator-lifecycle-manager/pkg/lib/server/server.go b/staging/operator-lifecycle-manager/pkg/lib/server/server.go index 3fab132997..90608987ea 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/server/server.go +++ b/staging/operator-lifecycle-manager/pkg/lib/server/server.go @@ -3,6 +3,7 @@ package server import ( "context" "crypto/tls" + "crypto/x509" "fmt" "net/http" "path/filepath" @@ -16,6 +17,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/filters" ) +// certPoolGetter is an interface for getting a certificate pool +type certPoolGetter interface { + GetCertPool() *x509.CertPool +} + // Option applies a configuration option to the given config. type Option func(s *serverConfig) @@ -94,6 +100,10 @@ func (sc *serverConfig) getAddress(tlsEnabled bool) string { return ":8080" } +func (sc *serverConfig) clientCAEnabled() bool { + return sc.clientCAPath != nil && *sc.clientCAPath != "" +} + func (sc serverConfig) getListenAndServeFunc() (func() error, error) { tlsEnabled, err := sc.tlsEnabled() if err != nil { @@ -168,15 +178,23 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { return nil, fmt.Errorf("error creating cert file watcher: %v", err) } csw.Run(context.Background()) - certPoolStore, err := filemonitor.NewCertPoolStore(*sc.clientCAPath) - if err != nil { - return nil, fmt.Errorf("certificate monitoring for client-ca failed: %v", err) - } - cpsw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.clientCAPath)}, certPoolStore.HandleCABundleUpdate) - if err != nil { - return nil, fmt.Errorf("error creating cert file watcher: %v", err) + + // Only setup client CA monitoring if clientCAPath is provided + var certPoolStore certPoolGetter + if sc.clientCAEnabled() { + cps, err := filemonitor.NewCertPoolStore(*sc.clientCAPath) + if err != nil { + return nil, fmt.Errorf("certificate monitoring for client-ca failed: %v", err) + } + cpsw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.clientCAPath)}, cps.HandleCABundleUpdate) + if err != nil { + return nil, fmt.Errorf("error creating cert file watcher: %v", err) + } + cpsw.Run(context.Background()) + certPoolStore = cps + } else { + sc.logger.Info("No client CA provided, client certificate verification disabled") } - cpsw.Run(context.Background()) s.TLSConfig = &tls.Config{ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { @@ -187,11 +205,15 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { if cert := certStore.GetCertificate(); cert != nil { certs = append(certs, *cert) } - return &tls.Config{ + tlsCfg := &tls.Config{ Certificates: certs, - ClientCAs: certPoolStore.GetCertPool(), - ClientAuth: tls.VerifyClientCertIfGiven, - }, nil + } + // Only configure client CA verification if certPoolStore is available + if certPoolStore != nil { + tlsCfg.ClientCAs = certPoolStore.GetCertPool() + tlsCfg.ClientAuth = tls.VerifyClientCertIfGiven + } + return tlsCfg, nil }, NextProtos: []string{"http/1.1"}, // Disable HTTP/2 for security } diff --git a/staging/operator-lifecycle-manager/pkg/lib/server/server_test.go b/staging/operator-lifecycle-manager/pkg/lib/server/server_test.go index f94f63fadc..f8391def0a 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/server/server_test.go +++ b/staging/operator-lifecycle-manager/pkg/lib/server/server_test.go @@ -111,6 +111,115 @@ func TestGetListenAndServeFunc_WithoutKubeConfig(t *testing.T) { assert.NoError(t, err, "GetListenAndServeFunc should succeed without kubeConfig") } +// TestGetListenAndServeFunc_WithEmptyClientCA tests that the server +// starts successfully when TLS is enabled but client-ca is empty +func TestGetListenAndServeFunc_WithEmptyClientCA(t *testing.T) { + // Generate test certificates dynamically + caCert, caKey, err := generateCA() + require.NoError(t, err) + + serverCert, serverKey, err := generateServerCert(caCert, caKey, "localhost") + require.NoError(t, err) + + tmpDir, err := os.MkdirTemp("", "server-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + tlsCertPath := filepath.Join(tmpDir, "tls.crt") + tlsKeyPath := filepath.Join(tmpDir, "tls.key") + emptyClientCAPath := "" // Empty client CA path + + err = os.WriteFile(tlsCertPath, serverCert, 0644) + require.NoError(t, err) + err = os.WriteFile(tlsKeyPath, serverKey, 0600) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(io.Discard) + + // Test with TLS enabled but empty client CA - should succeed + _, err = GetListenAndServeFunc( + WithLogger(logger), + WithTLS(&tlsCertPath, &tlsKeyPath, &emptyClientCAPath), + WithDebug(false), + ) + + assert.NoError(t, err, "GetListenAndServeFunc should succeed with empty client-ca") +} + +// TestGetListenAndServeFunc_WithNilClientCA tests that the server +// starts successfully when TLS is enabled but client-ca pointer is nil +func TestGetListenAndServeFunc_WithNilClientCA(t *testing.T) { + // Generate test certificates dynamically + caCert, caKey, err := generateCA() + require.NoError(t, err) + + serverCert, serverKey, err := generateServerCert(caCert, caKey, "localhost") + require.NoError(t, err) + + tmpDir, err := os.MkdirTemp("", "server-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + tlsCertPath := filepath.Join(tmpDir, "tls.crt") + tlsKeyPath := filepath.Join(tmpDir, "tls.key") + + err = os.WriteFile(tlsCertPath, serverCert, 0644) + require.NoError(t, err) + err = os.WriteFile(tlsKeyPath, serverKey, 0600) + require.NoError(t, err) + + logger := logrus.New() + logger.SetOutput(io.Discard) + + // Test with TLS enabled but nil client CA pointer - should succeed + _, err = GetListenAndServeFunc( + WithLogger(logger), + WithTLS(&tlsCertPath, &tlsKeyPath, nil), + WithDebug(false), + ) + + assert.NoError(t, err, "GetListenAndServeFunc should succeed with nil client-ca pointer") +} + +// TestClientCAEnabled tests the clientCAEnabled helper function +func TestClientCAEnabled(t *testing.T) { + tests := []struct { + name string + clientCAPath *string + expected bool + }{ + { + name: "nil pointer", + clientCAPath: nil, + expected: false, + }, + { + name: "empty string", + clientCAPath: strPtr(""), + expected: false, + }, + { + name: "valid path", + clientCAPath: strPtr("/path/to/ca.crt"), + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sc := &serverConfig{ + clientCAPath: tt.clientCAPath, + } + assert.Equal(t, tt.expected, sc.clientCAEnabled(), "clientCAEnabled result should match expected") + }) + } +} + +func strPtr(s string) *string { + return &s +} + // TestHTTPClientHasTLSConfig verifies that rest.HTTPClientFor creates a client // with proper TLS configuration including CA certificates func TestHTTPClientHasTLSConfig(t *testing.T) { diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go index 3fab132997..90608987ea 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go @@ -3,6 +3,7 @@ package server import ( "context" "crypto/tls" + "crypto/x509" "fmt" "net/http" "path/filepath" @@ -16,6 +17,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/metrics/filters" ) +// certPoolGetter is an interface for getting a certificate pool +type certPoolGetter interface { + GetCertPool() *x509.CertPool +} + // Option applies a configuration option to the given config. type Option func(s *serverConfig) @@ -94,6 +100,10 @@ func (sc *serverConfig) getAddress(tlsEnabled bool) string { return ":8080" } +func (sc *serverConfig) clientCAEnabled() bool { + return sc.clientCAPath != nil && *sc.clientCAPath != "" +} + func (sc serverConfig) getListenAndServeFunc() (func() error, error) { tlsEnabled, err := sc.tlsEnabled() if err != nil { @@ -168,15 +178,23 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { return nil, fmt.Errorf("error creating cert file watcher: %v", err) } csw.Run(context.Background()) - certPoolStore, err := filemonitor.NewCertPoolStore(*sc.clientCAPath) - if err != nil { - return nil, fmt.Errorf("certificate monitoring for client-ca failed: %v", err) - } - cpsw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.clientCAPath)}, certPoolStore.HandleCABundleUpdate) - if err != nil { - return nil, fmt.Errorf("error creating cert file watcher: %v", err) + + // Only setup client CA monitoring if clientCAPath is provided + var certPoolStore certPoolGetter + if sc.clientCAEnabled() { + cps, err := filemonitor.NewCertPoolStore(*sc.clientCAPath) + if err != nil { + return nil, fmt.Errorf("certificate monitoring for client-ca failed: %v", err) + } + cpsw, err := filemonitor.NewWatch(sc.logger, []string{filepath.Dir(*sc.clientCAPath)}, cps.HandleCABundleUpdate) + if err != nil { + return nil, fmt.Errorf("error creating cert file watcher: %v", err) + } + cpsw.Run(context.Background()) + certPoolStore = cps + } else { + sc.logger.Info("No client CA provided, client certificate verification disabled") } - cpsw.Run(context.Background()) s.TLSConfig = &tls.Config{ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { @@ -187,11 +205,15 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { if cert := certStore.GetCertificate(); cert != nil { certs = append(certs, *cert) } - return &tls.Config{ + tlsCfg := &tls.Config{ Certificates: certs, - ClientCAs: certPoolStore.GetCertPool(), - ClientAuth: tls.VerifyClientCertIfGiven, - }, nil + } + // Only configure client CA verification if certPoolStore is available + if certPoolStore != nil { + tlsCfg.ClientCAs = certPoolStore.GetCertPool() + tlsCfg.ClientAuth = tls.VerifyClientCertIfGiven + } + return tlsCfg, nil }, NextProtos: []string{"http/1.1"}, // Disable HTTP/2 for security } From d4162cc12eb34d84549943db54c4cda56126e918 Mon Sep 17 00:00:00 2001 From: Todd Short Date: Thu, 22 Jan 2026 10:25:12 -0500 Subject: [PATCH 5/6] Add APIServer TLS controller for OpenShift cluster-wide TLS configuration (#3739) Implements a controller that watches the OpenShift APIServer resource and provides thread-safe access to TLS configuration for HTTPS servers/clients. The controller is modeled after the existing proxy controller. This is intended to be used by the OLM operator, Catalog operator and marketplace operator (but it will need to be copied). It will also be used by the downstream PSM component. Assisted-By: Claude Signed-off-by: Todd Short Upstream-repository: operator-lifecycle-manager Upstream-commit: 6fc19e0f6397a572e6bd4e9984aa213960553691 --- go.mod | 1 + go.sum | 3 + staging/operator-lifecycle-manager/go.mod | 1 + staging/operator-lifecycle-manager/go.sum | 3 + .../pkg/controller/operators/olm/operator.go | 12 +- .../pkg/lib/apiserver/querier.go | 35 + .../pkg/lib/apiserver/querier_test.go | 86 ++ .../pkg/lib/apiserver/syncer.go | 212 +++ .../pkg/lib/apiserver/syncer_test.go | 232 ++++ .../pkg/lib/apiserver/tlsconfig.go | 105 ++ .../pkg/lib/apiserver/tlsconfig_test.go | 190 +++ .../{proxy => openshiftconfig}/available.go | 17 +- .../pkg/lib/proxy/syncer.go | 3 +- .../pkg/lib/server/server.go | 41 +- .../github.com/openshift/library-go/LICENSE | 201 +++ .../openshift/library-go/pkg/crypto/OWNERS | 4 + .../openshift/library-go/pkg/crypto/crypto.go | 1214 +++++++++++++++++ .../library-go/pkg/crypto/rotation.go | 20 + .../pkg/controller/operators/olm/operator.go | 12 +- .../pkg/lib/apiserver/querier.go | 35 + .../pkg/lib/apiserver/syncer.go | 212 +++ .../pkg/lib/apiserver/tlsconfig.go | 105 ++ .../{proxy => openshiftconfig}/available.go | 17 +- .../pkg/lib/proxy/syncer.go | 3 +- .../pkg/lib/server/server.go | 41 +- vendor/modules.txt | 5 + 26 files changed, 2752 insertions(+), 58 deletions(-) create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/querier.go create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/querier_test.go create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer_test.go create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go create mode 100644 staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig_test.go rename staging/operator-lifecycle-manager/pkg/lib/{proxy => openshiftconfig}/available.go (73%) create mode 100644 vendor/github.com/openshift/library-go/LICENSE create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/crypto.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/rotation.go create mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/querier.go create mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go create mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go rename vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/{proxy => openshiftconfig}/available.go (73%) diff --git a/go.mod b/go.mod index 34a846cea9..724a44f3f3 100644 --- a/go.mod +++ b/go.mod @@ -151,6 +151,7 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 // indirect + github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/errors v0.9.1 // indirect diff --git a/go.sum b/go.sum index 8444576b85..72c2799d8c 100644 --- a/go.sum +++ b/go.sum @@ -280,6 +280,7 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -420,6 +421,8 @@ github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 h1:MemawsK6SpxEaE5y0 github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 h1:9JBeIXmnHlpXTQPi7LPmu1jdxznBhAE7bb1K+3D8gxY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235/go.mod h1:L49W6pfrZkfOE5iC1PqEkuLkXG4W0BX4w8b+L2Bv7fM= +github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 h1:zX9Od4Jg8sVmwQLwk6Vd+BX7tcyC/462FVvDdzHEPPk= +github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462/go.mod h1:nIzWQQE49XbiKizVnVOip9CEB7HJ0hoJwNi3g3YKnKc= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= diff --git a/staging/operator-lifecycle-manager/go.mod b/staging/operator-lifecycle-manager/go.mod index fc2372fba6..3256e28e06 100644 --- a/staging/operator-lifecycle-manager/go.mod +++ b/staging/operator-lifecycle-manager/go.mod @@ -20,6 +20,7 @@ require ( github.com/onsi/gomega v1.39.0 github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 + github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 github.com/operator-framework/api v0.37.0 github.com/operator-framework/operator-registry v1.61.0 github.com/otiai10/copy v1.14.1 diff --git a/staging/operator-lifecycle-manager/go.sum b/staging/operator-lifecycle-manager/go.sum index 3a28495536..c60ba85135 100644 --- a/staging/operator-lifecycle-manager/go.sum +++ b/staging/operator-lifecycle-manager/go.sum @@ -223,6 +223,7 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -322,6 +323,8 @@ github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 h1:MemawsK6SpxEaE5y0 github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 h1:9JBeIXmnHlpXTQPi7LPmu1jdxznBhAE7bb1K+3D8gxY= github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235/go.mod h1:L49W6pfrZkfOE5iC1PqEkuLkXG4W0BX4w8b+L2Bv7fM= +github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 h1:zX9Od4Jg8sVmwQLwk6Vd+BX7tcyC/462FVvDdzHEPPk= +github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462/go.mod h1:nIzWQQE49XbiKizVnVOip9CEB7HJ0hoJwNi3g3YKnKc= github.com/operator-framework/api v0.37.0 h1:2XCMWitBnumtJTqzip6LQKUwpM2pXVlt3gkpdlkbaCE= github.com/operator-framework/api v0.37.0/go.mod h1:NZs4vB+Jiamyv3pdPDjZtuC4U7KX0eq4z2r5hKY5fUA= github.com/operator-framework/operator-registry v1.61.0 h1:LgX6lP5hUHfpMTMygsnySc7PKxibzqIoqWUm6NPWl2M= diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go b/staging/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go index 838010d190..cd9bb45e9c 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go @@ -57,6 +57,7 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/labeler" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" @@ -852,19 +853,20 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat return nil, err } - // setup proxy env var injection policies + // Check if OpenShift config API is available (used by proxy and apiserver controllers) discovery := config.operatorClient.KubernetesInterface().Discovery() - proxyAPIExists, err := proxy.IsAPIAvailable(discovery) + openshiftConfigAPIExists, err := openshiftconfig.IsAPIAvailable(discovery) if err != nil { - op.logger.Errorf("error happened while probing for Proxy API support - %v", err) + op.logger.Errorf("error happened while probing for OpenShift config API support - %v", err) return nil, err } + // setup proxy env var injection policies proxyQuerierInUse := proxy.NoopQuerier() - if proxyAPIExists { + if openshiftConfigAPIExists { op.logger.Info("OpenShift Proxy API available - setting up watch for Proxy type") - proxyInformer, proxySyncer, proxyQuerier, err := proxy.NewSyncer(op.logger, config.configClient, discovery) + proxyInformer, proxySyncer, proxyQuerier, err := proxy.NewSyncer(op.logger, config.configClient) if err != nil { err = fmt.Errorf("failed to initialize syncer for Proxy type - %v", err) return nil, err diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier.go new file mode 100644 index 0000000000..94963890e1 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier.go @@ -0,0 +1,35 @@ +package apiserver + +import ( + "crypto/tls" + "fmt" +) + +// NoopQuerier returns an instance of noopQuerier. It's used for upstream where +// we don't have any apiserver.config.openshift.io/cluster resource. +func NoopQuerier() Querier { + return &noopQuerier{} +} + +// Querier is an interface that wraps the QueryTLSConfig method. +// +// QueryTLSConfig updates the provided TLS configuration with cluster-wide +// TLS security profile settings (MinVersion, CipherSuites, PreferServerCipherSuites). +type Querier interface { + QueryTLSConfig(config *tls.Config) error +} + +type noopQuerier struct { +} + +// QueryTLSConfig applies secure default TLS settings to the provided config. +// This is used on non-OpenShift clusters where there is no apiserver.config.openshift.io/cluster resource, +// but we still want to ensure secure TLS configuration. +func (*noopQuerier) QueryTLSConfig(config *tls.Config) error { + if config == nil { + return fmt.Errorf("tls.Config cannot be nil") + } + + // Apply secure defaults for non-OpenShift clusters + return ApplySecureDefaults(config) +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier_test.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier_test.go new file mode 100644 index 0000000000..7b51fa1a9a --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/querier_test.go @@ -0,0 +1,86 @@ +package apiserver_test + +import ( + "crypto/tls" + "testing" + + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNoopQuerier_QueryTLSConfig(t *testing.T) { + tests := []struct { + name string + config *tls.Config + expectError bool + errorMsg string + }{ + { + name: "WithNilConfig", + config: nil, + expectError: true, + errorMsg: "tls.Config cannot be nil", + }, + { + name: "WithEmptyConfig", + config: &tls.Config{}, + expectError: false, + }, + { + name: "WithPartialConfig", + config: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + querier := apiserver.NoopQuerier() + err := querier.QueryTLSConfig(tt.config) + + if tt.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + // Verify secure defaults are applied + assert.NotZero(t, tt.config.MinVersion, "MinVersion should be set to a default") + assert.NotEmpty(t, tt.config.CipherSuites, "CipherSuites should be set to defaults") + assert.True(t, tt.config.PreferServerCipherSuites, "PreferServerCipherSuites should be true") + } + }) + } +} + +func TestNoopQuerier_AppliesSecureDefaults(t *testing.T) { + querier := apiserver.NoopQuerier() + config := &tls.Config{} + + err := querier.QueryTLSConfig(config) + require.NoError(t, err) + + // Verify secure defaults + assert.GreaterOrEqual(t, config.MinVersion, uint16(tls.VersionTLS12), "Should use at least TLS 1.2") + assert.NotEmpty(t, config.CipherSuites, "Should have cipher suites configured") + + // Verify cipher suites are valid + for _, cipher := range config.CipherSuites { + assert.NotZero(t, cipher, "Cipher suite should not be zero") + } +} + +func TestNoopQuerier_DoesNotOverwriteNonZeroMinVersion(t *testing.T) { + querier := apiserver.NoopQuerier() + config := &tls.Config{ + MinVersion: tls.VersionTLS13, + } + + err := querier.QueryTLSConfig(config) + require.NoError(t, err) + + // MinVersion should be preserved if already set + assert.Equal(t, uint16(tls.VersionTLS13), config.MinVersion, "Should preserve existing MinVersion") +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go new file mode 100644 index 0000000000..4b3793cd62 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go @@ -0,0 +1,212 @@ +package apiserver + +import ( + "crypto/tls" + "fmt" + "sync" + "time" + + "github.com/openshift/client-go/config/informers/externalversions" + + apiconfigv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned" + configv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + "github.com/sirupsen/logrus" + "k8s.io/client-go/tools/cache" +) + +const ( + // This is the cluster level global apiserver.config.openshift.io/cluster object name. + globalAPIServerName = "cluster" + + // default sync interval + defaultSyncInterval = 30 * time.Minute +) + +// NewSyncer returns informer and sync functions to enable watch of the apiserver.config.openshift.io/cluster resource. +func NewSyncer(logger *logrus.Logger, client configv1client.Interface) (apiServerInformer configv1.APIServerInformer, syncer *Syncer, querier Querier, factory externalversions.SharedInformerFactory, err error) { + factory = externalversions.NewSharedInformerFactoryWithOptions(client, defaultSyncInterval) + apiServerInformer = factory.Config().V1().APIServers() + s := &Syncer{ + logger: logger, + currentConfig: newTLSConfigHolder(), + } + + syncer = s + querier = s + return +} + +// RegisterEventHandlers registers event handlers for apiserver.config.openshift.io/cluster resource changes. +// This is a convenience function to set up Add/Update/Delete handlers that call +// the syncer's SyncAPIServer and HandleAPIServerDelete methods. +func RegisterEventHandlers(informer configv1.APIServerInformer, syncer *Syncer) { + informer.Informer().AddEventHandler(&cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if err := syncer.SyncAPIServer(obj); err != nil { + syncer.logger.WithError(err).Error("error syncing APIServer on add") + } + }, + UpdateFunc: func(_, newObj interface{}) { + if err := syncer.SyncAPIServer(newObj); err != nil { + syncer.logger.WithError(err).Error("error syncing APIServer on update") + } + }, + DeleteFunc: func(obj interface{}) { + syncer.HandleAPIServerDelete(obj) + }, + }) +} + +// Syncer deals with watching APIServer type(s) on the cluster and let the caller +// query for cluster scoped APIServer TLS configuration. +type Syncer struct { + logger *logrus.Logger + currentConfig *tlsConfigHolder +} + +// tlsConfigHolder holds TLS configuration in a thread-safe manner. +// It always contains a valid configuration with secure defaults. +type tlsConfigHolder struct { + mu sync.RWMutex + config tls.Config +} + +// newTLSConfigHolder creates a new holder initialized with secure defaults. +func newTLSConfigHolder() *tlsConfigHolder { + h := &tlsConfigHolder{} + // Initialize with secure defaults + _ = ApplySecureDefaults(&h.config) + return h +} + +// update atomically updates the stored TLS configuration. +func (h *tlsConfigHolder) update(minVersion uint16, cipherSuites []uint16) { + h.mu.Lock() + defer h.mu.Unlock() + + h.config.MinVersion = minVersion + // Make a defensive copy of the slice + h.config.CipherSuites = make([]uint16, len(cipherSuites)) + copy(h.config.CipherSuites, cipherSuites) + h.config.PreferServerCipherSuites = true +} + +// copyTo atomically copies the cached TLS settings to the provided config. +// All reading and copying happens under the read lock, ensuring thread safety. +func (h *tlsConfigHolder) copyTo(config *tls.Config) { + h.mu.RLock() + defer h.mu.RUnlock() + + // Copy all fields while holding the lock + config.MinVersion = h.config.MinVersion + config.CipherSuites = make([]uint16, len(h.config.CipherSuites)) + copy(config.CipherSuites, h.config.CipherSuites) + config.PreferServerCipherSuites = h.config.PreferServerCipherSuites +} + +// QueryTLSConfig queries the global cluster level APIServer object and updates +// the provided TLS configuration with the cluster-wide security profile settings. +func (w *Syncer) QueryTLSConfig(config *tls.Config) error { + if config == nil { + return fmt.Errorf("tls.Config cannot be nil") + } + + // Copy the current cached config atomically + // This always succeeds because currentConfig always has a valid value + w.currentConfig.copyTo(config) + return nil +} + +// SyncAPIServer is invoked when a cluster scoped APIServer object is added or modified. +func (w *Syncer) SyncAPIServer(object interface{}) error { + apiserver, ok := object.(*apiconfigv1.APIServer) + if !ok { + w.logger.Error("wrong type in APIServer syncer") + return nil + } + + // Convert the TLS security profile to get new settings + minVersion, cipherSuites := GetSecurityProfileConfig(apiserver.Spec.TLSSecurityProfile) + + // Check if configuration changed (before updating) + changed := w.hasConfigChanged(minVersion, cipherSuites) + + // Update the stored configuration atomically + w.currentConfig.update(minVersion, cipherSuites) + + // Log if configuration changed + if changed { + profileName := getProfileName(apiserver.Spec.TLSSecurityProfile) + w.logger.Infof("APIServer TLS configuration changed: profile=%s, minVersion=%s, cipherCount=%d", + profileName, + tlsVersionToString(minVersion), + len(cipherSuites)) + } + + return nil +} + +// HandleAPIServerDelete is invoked when a cluster scoped APIServer object is deleted. +func (w *Syncer) HandleAPIServerDelete(object interface{}) { + _, ok := object.(*apiconfigv1.APIServer) + if !ok { + w.logger.Error("wrong type in APIServer delete syncer") + return + } + + // Reset to secure defaults (Intermediate profile) + w.currentConfig.update(GetSecurityProfileConfig(nil)) + + w.logger.Info("APIServer TLS configuration deleted, reverted to secure defaults") + return +} + +// hasConfigChanged checks if the new TLS settings differ from the current cached settings. +func (w *Syncer) hasConfigChanged(minVersion uint16, cipherSuites []uint16) bool { + w.currentConfig.mu.RLock() + defer w.currentConfig.mu.RUnlock() + + if w.currentConfig.config.MinVersion != minVersion { + return true + } + if len(w.currentConfig.config.CipherSuites) != len(cipherSuites) { + return true + } + for i := range cipherSuites { + if w.currentConfig.config.CipherSuites[i] != cipherSuites[i] { + return true + } + } + return false +} + +// getProfileName returns the TLS security profile name for logging. +func getProfileName(profile *apiconfigv1.TLSSecurityProfile) string { + if profile == nil { + return "Intermediate (default)" + } + + profileType := string(profile.Type) + if profileType == "" { + return "Intermediate (default)" + } + + return profileType +} + +// tlsVersionToString converts a TLS version number to a string +func tlsVersionToString(version uint16) string { + switch version { + case tls.VersionTLS10: + return "TLS 1.0" + case tls.VersionTLS11: + return "TLS 1.1" + case tls.VersionTLS12: + return "TLS 1.2" + case tls.VersionTLS13: + return "TLS 1.3" + default: + return "unknown" + } +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer_test.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer_test.go new file mode 100644 index 0000000000..282eaad3e8 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/syncer_test.go @@ -0,0 +1,232 @@ +package apiserver_test + +import ( + "crypto/tls" + "sync" + "testing" + + apiconfigv1 "github.com/openshift/api/config/v1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSyncer_QueryTLSConfig_NilConfig(t *testing.T) { + logger := logrus.New() + logger.SetOutput(logrus.StandardLogger().Out) + + syncer := &apiserver.Syncer{} + + err := syncer.QueryTLSConfig(nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "tls.Config cannot be nil") +} + +func TestSyncer_QueryTLSConfig_ReturnsDefaults(t *testing.T) { + logger := logrus.New() + logger.SetOutput(logrus.StandardLogger().Out) + + // Note: We can't easily create a Syncer directly because it requires + // a lister and currentConfig that are internal. Instead, we'll test + // that the Querier interface works as expected with NoopQuerier, + // which has similar behavior for testing purposes. + querier := apiserver.NoopQuerier() + config := &tls.Config{} + + err := querier.QueryTLSConfig(config) + require.NoError(t, err) + + // Verify defaults are applied + assert.NotZero(t, config.MinVersion, "MinVersion should be set") + assert.NotEmpty(t, config.CipherSuites, "CipherSuites should be set") + assert.True(t, config.PreferServerCipherSuites, "PreferServerCipherSuites should be true") +} + +func TestSyncer_SyncAPIServer_IntermediateProfile(t *testing.T) { + // Create a mock APIServer object with Intermediate profile + server := &apiconfigv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: apiconfigv1.APIServerSpec{ + TLSSecurityProfile: &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileIntermediateType, + }, + }, + } + + // Test that GetSecurityProfileConfig returns expected values for Intermediate + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(server.Spec.TLSSecurityProfile) + + assert.Equal(t, uint16(tls.VersionTLS12), minVersion, "Intermediate should use TLS 1.2") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") + assert.Greater(t, len(cipherSuites), 5, "Intermediate should have multiple ciphers") +} + +func TestSyncer_SyncAPIServer_ModernProfile(t *testing.T) { + // Create a mock APIServer object with Modern profile + server := &apiconfigv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: apiconfigv1.APIServerSpec{ + TLSSecurityProfile: &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileModernType, + }, + }, + } + + // Test that GetSecurityProfileConfig returns expected values for Modern + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(server.Spec.TLSSecurityProfile) + + assert.Equal(t, uint16(tls.VersionTLS13), minVersion, "Modern should use TLS 1.3") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +func TestSyncer_SyncAPIServer_CustomProfile(t *testing.T) { + // Create a mock APIServer object with Custom profile + server := &apiconfigv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: apiconfigv1.APIServerSpec{ + TLSSecurityProfile: &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileCustomType, + Custom: &apiconfigv1.CustomTLSProfile{ + TLSProfileSpec: apiconfigv1.TLSProfileSpec{ + MinTLSVersion: apiconfigv1.VersionTLS13, + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + }, + }, + }, + }, + }, + } + + // Test that GetSecurityProfileConfig returns expected values for Custom + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(server.Spec.TLSSecurityProfile) + + assert.Equal(t, uint16(tls.VersionTLS13), minVersion, "Custom should respect MinTLSVersion") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +// TestConcurrentQueryTLSConfig tests thread safety of QueryTLSConfig. +// This simulates multiple goroutines reading the TLS config concurrently. +func TestConcurrentQueryTLSConfig(t *testing.T) { + querier := apiserver.NoopQuerier() + + // Run multiple goroutines concurrently + const numGoroutines = 50 + var wg sync.WaitGroup + wg.Add(numGoroutines) + + // Channel to collect errors + errors := make(chan error, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + go func() { + defer wg.Done() + + config := &tls.Config{} + if err := querier.QueryTLSConfig(config); err != nil { + errors <- err + return + } + + // Verify the config was populated correctly + if config.MinVersion == 0 { + errors <- assert.AnError + return + } + if len(config.CipherSuites) == 0 { + errors <- assert.AnError + return + } + }() + } + + wg.Wait() + close(errors) + + // Check for any errors + for err := range errors { + t.Errorf("Concurrent query failed: %v", err) + } +} + +// TestConfigIsolation verifies that modifications to a returned config +// don't affect cached values or other callers. +func TestConfigIsolation(t *testing.T) { + querier := apiserver.NoopQuerier() + + // Get first config + config1 := &tls.Config{} + err := querier.QueryTLSConfig(config1) + require.NoError(t, err) + + originalMinVersion := config1.MinVersion + originalCipherCount := len(config1.CipherSuites) + + // Modify the first config + config1.MinVersion = tls.VersionTLS10 + config1.CipherSuites = []uint16{tls.TLS_RSA_WITH_RC4_128_SHA} + + // Get second config + config2 := &tls.Config{} + err = querier.QueryTLSConfig(config2) + require.NoError(t, err) + + // Verify the second config has the original values, not the modified ones + assert.Equal(t, originalMinVersion, config2.MinVersion, "MinVersion should not be affected by modifications to other config") + assert.Equal(t, originalCipherCount, len(config2.CipherSuites), "CipherSuites should not be affected by modifications to other config") + assert.NotEqual(t, config1.MinVersion, config2.MinVersion, "Configs should be isolated") +} + +// TestApplySecureDefaults_PreservesExistingValues tests that +// ApplySecureDefaults only sets values that are zero/empty. +func TestApplySecureDefaults_PreservesExistingValues(t *testing.T) { + config := &tls.Config{ + MinVersion: tls.VersionTLS13, + CipherSuites: []uint16{tls.TLS_AES_256_GCM_SHA384}, + } + + err := apiserver.ApplySecureDefaults(config) + require.NoError(t, err) + + // MinVersion and CipherSuites should be preserved + assert.Equal(t, uint16(tls.VersionTLS13), config.MinVersion, "Should preserve existing MinVersion") + assert.Len(t, config.CipherSuites, 1, "Should preserve existing CipherSuites") + assert.Equal(t, uint16(tls.TLS_AES_256_GCM_SHA384), config.CipherSuites[0]) + + // PreferServerCipherSuites should still be set + assert.True(t, config.PreferServerCipherSuites, "Should set PreferServerCipherSuites") +} + +// TestGetConfigForClient_CreatesFreshConfig tests that the callback +// returns a properly configured TLS config for each connection. +func TestGetConfigForClient_CreatesFreshConfig(t *testing.T) { + querier := apiserver.NoopQuerier() + callback := apiserver.GetConfigForClient(querier) + require.NotNil(t, callback) + + // Call the callback multiple times + config1, err1 := callback(nil) + require.NoError(t, err1) + require.NotNil(t, config1) + + config2, err2 := callback(nil) + require.NoError(t, err2) + require.NotNil(t, config2) + + // Each call should return a different config object + assert.NotSame(t, config1, config2, "Should return fresh config for each connection") + + // But they should have the same values + assert.Equal(t, config1.MinVersion, config2.MinVersion) + assert.Equal(t, config1.CipherSuites, config2.CipherSuites) + assert.Equal(t, config1.PreferServerCipherSuites, config2.PreferServerCipherSuites) +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go new file mode 100644 index 0000000000..29a240b179 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go @@ -0,0 +1,105 @@ +package apiserver + +import ( + "crypto/tls" + + apiconfigv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +// GetSecurityProfileConfig extracts the minimum TLS version and cipher suites +// from a TLSSecurityProfile object. Converts OpenSSL cipher names to Go TLS cipher IDs. +// If profile is nil, returns config defined by the Intermediate TLS Profile. +func GetSecurityProfileConfig(profile *apiconfigv1.TLSSecurityProfile) (uint16, []uint16) { + var profileType apiconfigv1.TLSProfileType + if profile == nil { + profileType = apiconfigv1.TLSProfileIntermediateType + } else { + profileType = profile.Type + } + + var profileSpec *apiconfigv1.TLSProfileSpec + if profileType == apiconfigv1.TLSProfileCustomType { + if profile.Custom != nil { + profileSpec = &profile.Custom.TLSProfileSpec + } + } else { + profileSpec = apiconfigv1.TLSProfiles[profileType] + } + + // nothing found / custom type set but no actual custom spec + if profileSpec == nil { + profileSpec = apiconfigv1.TLSProfiles[apiconfigv1.TLSProfileIntermediateType] + } + + // Convert the TLS version string to the Go constant + minTLSVersion, err := crypto.TLSVersion(string(profileSpec.MinTLSVersion)) + if err != nil { + // Fallback to default if conversion fails + minTLSVersion = crypto.DefaultTLSVersion() + } + + // Convert OpenSSL cipher names to IANA names, then to Go cipher suite IDs + ianaCipherNames := crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers) + cipherSuites := CipherNamesToIDs(ianaCipherNames) + + return minTLSVersion, cipherSuites +} + +// CipherNamesToIDs converts IANA cipher suite names to Go TLS cipher suite IDs +func CipherNamesToIDs(cipherNames []string) []uint16 { + var cipherIDs []uint16 + + for _, name := range cipherNames { + if id, err := crypto.CipherSuite(name); err == nil { + cipherIDs = append(cipherIDs, id) + } + } + + // If no valid ciphers were found, use defaults + if len(cipherIDs) == 0 { + cipherIDs = crypto.DefaultCiphers() + } + + return cipherIDs +} + +// ApplySecureDefaults applies secure default TLS settings to the provided config. +// This ensures a minimum security baseline even when no cluster-wide profile is configured. +func ApplySecureDefaults(config *tls.Config) error { + if config.MinVersion == 0 { + config.MinVersion = crypto.DefaultTLSVersion() + } + if len(config.CipherSuites) == 0 { + config.CipherSuites = crypto.DefaultCiphers() + } + config.PreferServerCipherSuites = true + return nil +} + +// GetConfigForClient returns a GetConfigForClient callback function that can be used +// with tls.Config to provide per-connection dynamic TLS configuration updates. +// This allows the TLS settings to be updated without restarting the server. +// +// Example usage: +// +// server := &http.Server{ +// Addr: ":8443", +// TLSConfig: &tls.Config{ +// GetConfigForClient: apiserver.GetConfigForClient(querier), +// // Other settings like Certificates, ClientCAs, etc. +// }, +// } +func GetConfigForClient(querier Querier) func(*tls.ClientHelloInfo) (*tls.Config, error) { + return func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + // Create a new config for this connection + config := &tls.Config{} + + // Apply cluster-wide TLS profile settings + if err := querier.QueryTLSConfig(config); err != nil { + return nil, err + } + + return config, nil + } +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig_test.go b/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig_test.go new file mode 100644 index 0000000000..1af7cbbf97 --- /dev/null +++ b/staging/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig_test.go @@ -0,0 +1,190 @@ +package apiserver_test + +import ( + "crypto/tls" + "testing" + + apiconfigv1 "github.com/openshift/api/config/v1" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSecurityProfileConfig_NilProfile(t *testing.T) { + // When profile is nil, should use Intermediate defaults + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) + + assert.Equal(t, uint16(tls.VersionTLS12), minVersion, "Intermediate profile should use TLS 1.2") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +func TestGetSecurityProfileConfig_IntermediateProfile(t *testing.T) { + profile := &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileIntermediateType, + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(profile) + + assert.Equal(t, uint16(tls.VersionTLS12), minVersion, "Intermediate profile should use TLS 1.2") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") + assert.Greater(t, len(cipherSuites), 5, "Intermediate should have multiple cipher suites") +} + +func TestGetSecurityProfileConfig_ModernProfile(t *testing.T) { + profile := &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileModernType, + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(profile) + + assert.Equal(t, uint16(tls.VersionTLS13), minVersion, "Modern profile should use TLS 1.3") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +func TestGetSecurityProfileConfig_OldProfile(t *testing.T) { + profile := &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileOldType, + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(profile) + + assert.Equal(t, uint16(tls.VersionTLS10), minVersion, "Old profile should use TLS 1.0") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") + assert.Greater(t, len(cipherSuites), 10, "Old profile should have many cipher suites for compatibility") +} + +func TestGetSecurityProfileConfig_CustomProfile(t *testing.T) { + profile := &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileCustomType, + Custom: &apiconfigv1.CustomTLSProfile{ + TLSProfileSpec: apiconfigv1.TLSProfileSpec{ + MinTLSVersion: apiconfigv1.VersionTLS13, + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + }, + }, + }, + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(profile) + + assert.Equal(t, uint16(tls.VersionTLS13), minVersion, "Custom profile should respect MinTLSVersion") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +func TestGetSecurityProfileConfig_CustomProfileWithoutSpec(t *testing.T) { + // Custom type but no actual custom spec should fall back to Intermediate + profile := &apiconfigv1.TLSSecurityProfile{ + Type: apiconfigv1.TLSProfileCustomType, + Custom: nil, + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(profile) + + assert.Equal(t, uint16(tls.VersionTLS12), minVersion, "Should fall back to Intermediate") + assert.NotEmpty(t, cipherSuites, "Should have cipher suites") +} + +func TestApplySecureDefaults(t *testing.T) { + tests := []struct { + name string + config *tls.Config + }{ + { + name: "EmptyConfig", + config: &tls.Config{}, + }, + { + name: "ConfigWithMinVersionOnly", + config: &tls.Config{ + MinVersion: tls.VersionTLS13, + }, + }, + { + name: "ConfigWithCiphersOnly", + config: &tls.Config{ + CipherSuites: []uint16{tls.TLS_AES_256_GCM_SHA384}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := apiserver.ApplySecureDefaults(tt.config) + require.NoError(t, err) + + // Verify defaults are applied + if tt.name == "EmptyConfig" { + assert.NotZero(t, tt.config.MinVersion, "MinVersion should be set") + assert.NotEmpty(t, tt.config.CipherSuites, "CipherSuites should be set") + } + assert.True(t, tt.config.PreferServerCipherSuites, "PreferServerCipherSuites should be true") + }) + } +} + +func TestGetConfigForClient(t *testing.T) { + // Create a mock querier + querier := apiserver.NoopQuerier() + + // Get the callback function + callback := apiserver.GetConfigForClient(querier) + require.NotNil(t, callback) + + // Call the callback + config, err := callback(nil) + require.NoError(t, err) + require.NotNil(t, config) + + // Verify the config has secure defaults + assert.NotZero(t, config.MinVersion) + assert.NotEmpty(t, config.CipherSuites) + assert.True(t, config.PreferServerCipherSuites) +} + +func TestCipherNamesToIDs(t *testing.T) { + tests := []struct { + name string + cipherNames []string + expectEmpty bool + }{ + { + name: "ValidCiphers", + cipherNames: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + }, + expectEmpty: false, + }, + { + name: "EmptyCiphers", + cipherNames: []string{}, + expectEmpty: false, // Should fall back to defaults + }, + { + name: "InvalidCiphers", + cipherNames: []string{ + "INVALID_CIPHER_NAME", + }, + expectEmpty: false, // Should fall back to defaults + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cipherIDs := apiserver.CipherNamesToIDs(tt.cipherNames) + + if tt.expectEmpty { + assert.Empty(t, cipherIDs) + } else { + assert.NotEmpty(t, cipherIDs, "Should have cipher IDs (either valid or defaults)") + } + + // Verify all cipher IDs are non-zero + for _, id := range cipherIDs { + assert.NotZero(t, id, "Cipher ID should not be zero") + } + }) + } +} diff --git a/staging/operator-lifecycle-manager/pkg/lib/proxy/available.go b/staging/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go similarity index 73% rename from staging/operator-lifecycle-manager/pkg/lib/proxy/available.go rename to staging/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go index 03eff9850a..debf483a26 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/proxy/available.go +++ b/staging/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go @@ -1,4 +1,4 @@ -package proxy +package openshiftconfig import ( "errors" @@ -14,12 +14,11 @@ const ( notSupportedErrorMessage = "server does not support API version" ) -// IsAPIAvailable return true if OpenShift config API is present on the cluster. +// IsAPIAvailable returns true if OpenShift config API is present on the cluster. // Otherwise, supported is set to false. -func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (supported bool, err error) { +func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (bool, error) { if discovery == nil { - err = errors.New("discovery interface can not be ") - return + return false, errors.New("discovery interface can not be ") } opStatusGV := schema.GroupVersion{ @@ -28,13 +27,11 @@ func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (supported bool, } if discoveryErr := apidiscovery.ServerSupportsVersion(discovery, opStatusGV); discoveryErr != nil { if strings.Contains(discoveryErr.Error(), notSupportedErrorMessage) { - return + return false, nil } - err = discoveryErr - return + return false, discoveryErr } - supported = true - return + return true, nil } diff --git a/staging/operator-lifecycle-manager/pkg/lib/proxy/syncer.go b/staging/operator-lifecycle-manager/pkg/lib/proxy/syncer.go index b31df18bc9..6647481390 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/proxy/syncer.go +++ b/staging/operator-lifecycle-manager/pkg/lib/proxy/syncer.go @@ -12,7 +12,6 @@ import ( "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/discovery" ) const ( @@ -24,7 +23,7 @@ const ( ) // NewSyncer returns informer and sync functions to enable watch of Proxy type. -func NewSyncer(logger *logrus.Logger, client configv1client.Interface, discovery discovery.DiscoveryInterface) (proxyInformer configv1.ProxyInformer, syncer *Syncer, querier Querier, err error) { +func NewSyncer(logger *logrus.Logger, client configv1client.Interface) (proxyInformer configv1.ProxyInformer, syncer *Syncer, querier Querier, err error) { factory := externalversions.NewSharedInformerFactoryWithOptions(client, defaultSyncInterval) proxyInformer = factory.Config().V1().Proxies() s := &Syncer{ diff --git a/staging/operator-lifecycle-manager/pkg/lib/server/server.go b/staging/operator-lifecycle-manager/pkg/lib/server/server.go index 90608987ea..f56622ec52 100644 --- a/staging/operator-lifecycle-manager/pkg/lib/server/server.go +++ b/staging/operator-lifecycle-manager/pkg/lib/server/server.go @@ -8,6 +8,7 @@ import ( "net/http" "path/filepath" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/filemonitor" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/profile" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -58,13 +59,20 @@ func WithKubeConfig(config *rest.Config) Option { } } +func WithAPIServerTLSQuerier(querier apiserver.Querier) Option { + return func(sc *serverConfig) { + sc.apiServerTLSQuerier = querier + } +} + type serverConfig struct { - logger *logrus.Logger - tlsCertPath *string - tlsKeyPath *string - clientCAPath *string - kubeConfig *rest.Config - debug bool + logger *logrus.Logger + tlsCertPath *string + tlsKeyPath *string + clientCAPath *string + kubeConfig *rest.Config + apiServerTLSQuerier apiserver.Querier + debug bool } func (sc *serverConfig) apply(options []Option) { @@ -75,12 +83,13 @@ func (sc *serverConfig) apply(options []Option) { func defaultServerConfig() serverConfig { return serverConfig{ - tlsCertPath: nil, - tlsKeyPath: nil, - clientCAPath: nil, - kubeConfig: nil, - logger: nil, - debug: false, + tlsCertPath: nil, + tlsKeyPath: nil, + clientCAPath: nil, + kubeConfig: nil, + logger: nil, + apiServerTLSQuerier: nil, + debug: false, } } func (sc *serverConfig) tlsEnabled() (bool, error) { @@ -213,6 +222,14 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { tlsCfg.ClientCAs = certPoolStore.GetCertPool() tlsCfg.ClientAuth = tls.VerifyClientCertIfGiven } + + // Overlay cluster-wide TLS security profile settings if available + if sc.apiServerTLSQuerier != nil { + if err := sc.apiServerTLSQuerier.QueryTLSConfig(tlsCfg); err != nil { + sc.logger.WithError(err).Warn("Failed to query APIServer TLS config, using defaults") + } + } + return tlsCfg, nil }, NextProtos: []string{"http/1.1"}, // Disable HTTP/2 for security diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS new file mode 100644 index 0000000000..4d4ce5ab9e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - stlaz +approvers: + - stlaz diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 0000000000..bff6155c2f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1214 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +// TLS versions that are known to golang. Go 1.13 adds support for +// TLS 1.3 that's opt-out with a build flag. +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range supportedVersions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} + +// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names +// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +var openSSLToIANACiphersMap = map[string]string{ + // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows + "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + + // TLS 1.2 + "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B + "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F + "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C + "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 + "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9 + "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 + "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 + "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 + "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C + "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D + "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + + // TLS 1 + "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 + "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 + "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A + "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 + + // SSL 3 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + // The following suite ids appear twice in the cipher map (with + // and without the _SHA256 suffix) for the purposes of backwards + // compatibility. Always return the current rather than the legacy + // name. + switch intVal { + case tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + case tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + } + + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} + +// DefaultCiphers returns the default cipher suites for TLS connections. +// +// RECOMMENDATION: Instead of relying on this function directly, consumers should respect +// TLSSecurityProfile settings from one of the OpenShift API configuration resources: +// - For API servers: Use apiserver.config.openshift.io/cluster Spec.TLSSecurityProfile +// - For ingress controllers: Use operator.openshift.io/v1 IngressController Spec.TLSSecurityProfile +// - For kubelet: Use machineconfiguration.openshift.io/v1 KubeletConfig Spec.TLSSecurityProfile +// +// These API resources allow cluster administrators to choose between Old, Intermediate, +// Modern, or Custom TLS profiles. Components should observe these settings. +func DefaultCiphers() []uint16 { + // Aligned with intermediate profile of the 5.7 version of the Mozilla Server + // Side TLS guidelines found at: https://ssl-config.mozilla.org/guidelines/5.7.json + // + // Latest guidelines: https://ssl-config.mozilla.org/guidelines/latest.json + // + // This profile provides strong security with wide compatibility. + // It requires TLS 1.2+ and uses only AEAD cipher suites (GCM, ChaCha20-Poly1305) + // with ECDHE key exchange for perfect forward secrecy. + // + // All CBC-mode ciphers have been removed due to padding oracle vulnerabilities. + // All RSA key exchange ciphers have been removed due to lack of perfect forward secrecy. + // + // HTTP/2 compliance: All ciphers are compliant with RFC7540, section 9.2. + return []uint16{ + // TLS 1.2 cipher suites with ECDHE + AEAD + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by HTTP/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + + // TLS 1.3 cipher suites (negotiated automatically, not configurable) + tls.TLS_AES_128_GCM_SHA256, + tls.TLS_AES_256_GCM_SHA384, + tls.TLS_CHACHA20_POLY1305_SHA256, + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their +// IANA counterparts. +// Unknown ciphers are left out. +func OpenSSLToIANACipherSuites(ciphers []string) []string { + ianaCiphers := make([]string, 0, len(ciphers)) + + for _, c := range ciphers { + ianaCipher, found := openSSLToIANACiphersMap[c] + if found { + ianaCiphers = append(ianaCiphers, ianaCipher) + } + } + + return ianaCiphers +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := EncodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := os.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeDuration = time.Hour * 24 * 365 * 2 // 2 years + DefaultCACertificateLifetimeDuration = time.Hour * 24 * 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := os.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := os.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + return randomSerialNumber(), nil +} + +// randomSerialNumber returns a random int64 serial number based on +// time.Now. It is defined separately from the generator interface so +// that the caller doesn't have to worry about an input template or +// error - these are unnecessary when creating a random serial. +func randomSerialNumber() int64 { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63() +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, lifetime) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, lifetime) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, lifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, lifetime) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, lifetime time.Duration) (*TLSCertificateConfig, error) { + if lifetime <= 0 { + lifetime = DefaultCACertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %s!\n", subject.CommonName, lifetime.String()) + } + + if lifetime > DefaultCACertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeDuration) + } + return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, lifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, time.Now, caLifetime) +} + +func UnsafeMakeSelfSignedCAConfigForDurationAtTime(name string, currentTime func() time.Time, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, currentTime, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, currentTime func() time.Time, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + // AuthorityKeyId and SubjectKeyId should match for a self-signed CA + authorityKeyId := publicKeyHash + subjectKeyId := publicKeyHash + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, currentTime, authorityKeyId, subjectKeyId) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + authorityKeyId := issuer.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId) + signerCert, err := issuer.SignCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +// EnsureSubCA returns a subCA signed by the `ca`, whether it was created +// (as opposed to pre-existing), and any error that might occur during the subCA +// creation. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, bool, error) { + if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil { + return subCA, false, err + } + subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, lifetime) + return subCA, true, err +} + +// MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated +// while using this function. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, lifetime time.Duration) (*CA, error) { + klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile) + + subCAConfig, err := MakeCAConfigForDuration(name, lifetime, ca) + if err != nil { + return nil, err + } + + if err := subCAConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + Config: subCAConfig, + SerialGenerator: serialGenerator, + }, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.Set[string], lifetime time.Duration) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, lifetime) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.Set[string]) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + certNames := sets.New[string]() + for _, ip := range cert.IPAddresses { + certNames.Insert(ip.String()) + } + certNames.Insert(cert.DNSNames...) + if hostnames.Equal(certNames) { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("existing server certificate in %s does not match required hostnames", certFile) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.Set[string], lifetime time.Duration) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, lifetime) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.Set[string], lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.SignCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.Set[string], lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: sets.List(hostnames)[0]}, sets.List(hostnames), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.SignCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, lifetime time.Duration) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetClientCertificate(certFile, keyFile, u) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, lifetime) + return certConfig, true, err // true indicates we wrote the files. + } + return certConfig, false, nil +} + +func GetClientCertificate(certFile, keyFile string, u user.Info) (*TLSCertificateConfig, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + if subject := certConfig.Certs[0].Subject; subjectChanged(subject, UserToSubject(u)) { + return nil, fmt.Errorf("existing client certificate in %s was issued for a different Subject (%s)", + certFile, subject) + } + + return certConfig, nil +} + +func subjectChanged(existing, expected pkix.Name) bool { + sort.Strings(existing.Organization) + sort.Strings(expected.Organization) + + return existing.CommonName != expected.CommonName || + existing.SerialNumber != expected.SerialNumber || + !reflect.DeepEqual(existing.Organization, expected.Organization) +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := NewClientCertificateTemplate(UserToSubject(u), lifetime, time.Now) + clientCrt, err := ca.SignCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := EncodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = os.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = os.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := NewClientCertificateTemplateForDuration(UserToSubject(u), lifetime, time.Now) + clientCrt, err := ca.SignCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := EncodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func UserToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) SignCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return newRSAKeyPair() +} + +func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) { + publicKey, privateKey, err := newRSAKeyPair() + var publicKeyHash []byte + if err == nil { + hash := sha1.New() + hash.Write(publicKey.N.Bytes()) + publicKeyHash = hash.Sum(nil) + } + return publicKey, privateKey, publicKeyHash, err +} + +func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + + // Specify a random serial number to avoid the same issuer+serial + // number referring to different certs in a chain of trust if the + // signing certificate is ever rotated. + SerialNumber: big.NewInt(randomSerialNumber()), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + if lifetime <= 0 { + lifetime = DefaultCertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %s!\n", subject.CommonName, lifetime.String()) + } + + if lifetime > DefaultCertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeDuration) + } + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func NewClientCertificateTemplate(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + if lifetime <= 0 { + lifetime = DefaultCertificateLifetimeDuration + fmt.Fprintf(os.Stderr, "Validity period of the certificate for %q is unset, resetting to %s!\n", subject.CommonName, lifetime.String()) + } + + if lifetime > DefaultCertificateLifetimeDuration { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeDuration) + } + + return NewClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func NewClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeDuration time.Duration) { + defaultLifetimeInYears := defaultLifetimeDuration / 365 / 24 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func EncodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := EncodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 0000000000..0aa127037c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go index 838010d190..cd9bb45e9c 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/olm/operator.go @@ -57,6 +57,7 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/event" index "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/index" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/labeler" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil" @@ -852,19 +853,20 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat return nil, err } - // setup proxy env var injection policies + // Check if OpenShift config API is available (used by proxy and apiserver controllers) discovery := config.operatorClient.KubernetesInterface().Discovery() - proxyAPIExists, err := proxy.IsAPIAvailable(discovery) + openshiftConfigAPIExists, err := openshiftconfig.IsAPIAvailable(discovery) if err != nil { - op.logger.Errorf("error happened while probing for Proxy API support - %v", err) + op.logger.Errorf("error happened while probing for OpenShift config API support - %v", err) return nil, err } + // setup proxy env var injection policies proxyQuerierInUse := proxy.NoopQuerier() - if proxyAPIExists { + if openshiftConfigAPIExists { op.logger.Info("OpenShift Proxy API available - setting up watch for Proxy type") - proxyInformer, proxySyncer, proxyQuerier, err := proxy.NewSyncer(op.logger, config.configClient, discovery) + proxyInformer, proxySyncer, proxyQuerier, err := proxy.NewSyncer(op.logger, config.configClient) if err != nil { err = fmt.Errorf("failed to initialize syncer for Proxy type - %v", err) return nil, err diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/querier.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/querier.go new file mode 100644 index 0000000000..94963890e1 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/querier.go @@ -0,0 +1,35 @@ +package apiserver + +import ( + "crypto/tls" + "fmt" +) + +// NoopQuerier returns an instance of noopQuerier. It's used for upstream where +// we don't have any apiserver.config.openshift.io/cluster resource. +func NoopQuerier() Querier { + return &noopQuerier{} +} + +// Querier is an interface that wraps the QueryTLSConfig method. +// +// QueryTLSConfig updates the provided TLS configuration with cluster-wide +// TLS security profile settings (MinVersion, CipherSuites, PreferServerCipherSuites). +type Querier interface { + QueryTLSConfig(config *tls.Config) error +} + +type noopQuerier struct { +} + +// QueryTLSConfig applies secure default TLS settings to the provided config. +// This is used on non-OpenShift clusters where there is no apiserver.config.openshift.io/cluster resource, +// but we still want to ensure secure TLS configuration. +func (*noopQuerier) QueryTLSConfig(config *tls.Config) error { + if config == nil { + return fmt.Errorf("tls.Config cannot be nil") + } + + // Apply secure defaults for non-OpenShift clusters + return ApplySecureDefaults(config) +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go new file mode 100644 index 0000000000..4b3793cd62 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/syncer.go @@ -0,0 +1,212 @@ +package apiserver + +import ( + "crypto/tls" + "fmt" + "sync" + "time" + + "github.com/openshift/client-go/config/informers/externalversions" + + apiconfigv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned" + configv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + "github.com/sirupsen/logrus" + "k8s.io/client-go/tools/cache" +) + +const ( + // This is the cluster level global apiserver.config.openshift.io/cluster object name. + globalAPIServerName = "cluster" + + // default sync interval + defaultSyncInterval = 30 * time.Minute +) + +// NewSyncer returns informer and sync functions to enable watch of the apiserver.config.openshift.io/cluster resource. +func NewSyncer(logger *logrus.Logger, client configv1client.Interface) (apiServerInformer configv1.APIServerInformer, syncer *Syncer, querier Querier, factory externalversions.SharedInformerFactory, err error) { + factory = externalversions.NewSharedInformerFactoryWithOptions(client, defaultSyncInterval) + apiServerInformer = factory.Config().V1().APIServers() + s := &Syncer{ + logger: logger, + currentConfig: newTLSConfigHolder(), + } + + syncer = s + querier = s + return +} + +// RegisterEventHandlers registers event handlers for apiserver.config.openshift.io/cluster resource changes. +// This is a convenience function to set up Add/Update/Delete handlers that call +// the syncer's SyncAPIServer and HandleAPIServerDelete methods. +func RegisterEventHandlers(informer configv1.APIServerInformer, syncer *Syncer) { + informer.Informer().AddEventHandler(&cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if err := syncer.SyncAPIServer(obj); err != nil { + syncer.logger.WithError(err).Error("error syncing APIServer on add") + } + }, + UpdateFunc: func(_, newObj interface{}) { + if err := syncer.SyncAPIServer(newObj); err != nil { + syncer.logger.WithError(err).Error("error syncing APIServer on update") + } + }, + DeleteFunc: func(obj interface{}) { + syncer.HandleAPIServerDelete(obj) + }, + }) +} + +// Syncer deals with watching APIServer type(s) on the cluster and let the caller +// query for cluster scoped APIServer TLS configuration. +type Syncer struct { + logger *logrus.Logger + currentConfig *tlsConfigHolder +} + +// tlsConfigHolder holds TLS configuration in a thread-safe manner. +// It always contains a valid configuration with secure defaults. +type tlsConfigHolder struct { + mu sync.RWMutex + config tls.Config +} + +// newTLSConfigHolder creates a new holder initialized with secure defaults. +func newTLSConfigHolder() *tlsConfigHolder { + h := &tlsConfigHolder{} + // Initialize with secure defaults + _ = ApplySecureDefaults(&h.config) + return h +} + +// update atomically updates the stored TLS configuration. +func (h *tlsConfigHolder) update(minVersion uint16, cipherSuites []uint16) { + h.mu.Lock() + defer h.mu.Unlock() + + h.config.MinVersion = minVersion + // Make a defensive copy of the slice + h.config.CipherSuites = make([]uint16, len(cipherSuites)) + copy(h.config.CipherSuites, cipherSuites) + h.config.PreferServerCipherSuites = true +} + +// copyTo atomically copies the cached TLS settings to the provided config. +// All reading and copying happens under the read lock, ensuring thread safety. +func (h *tlsConfigHolder) copyTo(config *tls.Config) { + h.mu.RLock() + defer h.mu.RUnlock() + + // Copy all fields while holding the lock + config.MinVersion = h.config.MinVersion + config.CipherSuites = make([]uint16, len(h.config.CipherSuites)) + copy(config.CipherSuites, h.config.CipherSuites) + config.PreferServerCipherSuites = h.config.PreferServerCipherSuites +} + +// QueryTLSConfig queries the global cluster level APIServer object and updates +// the provided TLS configuration with the cluster-wide security profile settings. +func (w *Syncer) QueryTLSConfig(config *tls.Config) error { + if config == nil { + return fmt.Errorf("tls.Config cannot be nil") + } + + // Copy the current cached config atomically + // This always succeeds because currentConfig always has a valid value + w.currentConfig.copyTo(config) + return nil +} + +// SyncAPIServer is invoked when a cluster scoped APIServer object is added or modified. +func (w *Syncer) SyncAPIServer(object interface{}) error { + apiserver, ok := object.(*apiconfigv1.APIServer) + if !ok { + w.logger.Error("wrong type in APIServer syncer") + return nil + } + + // Convert the TLS security profile to get new settings + minVersion, cipherSuites := GetSecurityProfileConfig(apiserver.Spec.TLSSecurityProfile) + + // Check if configuration changed (before updating) + changed := w.hasConfigChanged(minVersion, cipherSuites) + + // Update the stored configuration atomically + w.currentConfig.update(minVersion, cipherSuites) + + // Log if configuration changed + if changed { + profileName := getProfileName(apiserver.Spec.TLSSecurityProfile) + w.logger.Infof("APIServer TLS configuration changed: profile=%s, minVersion=%s, cipherCount=%d", + profileName, + tlsVersionToString(minVersion), + len(cipherSuites)) + } + + return nil +} + +// HandleAPIServerDelete is invoked when a cluster scoped APIServer object is deleted. +func (w *Syncer) HandleAPIServerDelete(object interface{}) { + _, ok := object.(*apiconfigv1.APIServer) + if !ok { + w.logger.Error("wrong type in APIServer delete syncer") + return + } + + // Reset to secure defaults (Intermediate profile) + w.currentConfig.update(GetSecurityProfileConfig(nil)) + + w.logger.Info("APIServer TLS configuration deleted, reverted to secure defaults") + return +} + +// hasConfigChanged checks if the new TLS settings differ from the current cached settings. +func (w *Syncer) hasConfigChanged(minVersion uint16, cipherSuites []uint16) bool { + w.currentConfig.mu.RLock() + defer w.currentConfig.mu.RUnlock() + + if w.currentConfig.config.MinVersion != minVersion { + return true + } + if len(w.currentConfig.config.CipherSuites) != len(cipherSuites) { + return true + } + for i := range cipherSuites { + if w.currentConfig.config.CipherSuites[i] != cipherSuites[i] { + return true + } + } + return false +} + +// getProfileName returns the TLS security profile name for logging. +func getProfileName(profile *apiconfigv1.TLSSecurityProfile) string { + if profile == nil { + return "Intermediate (default)" + } + + profileType := string(profile.Type) + if profileType == "" { + return "Intermediate (default)" + } + + return profileType +} + +// tlsVersionToString converts a TLS version number to a string +func tlsVersionToString(version uint16) string { + switch version { + case tls.VersionTLS10: + return "TLS 1.0" + case tls.VersionTLS11: + return "TLS 1.1" + case tls.VersionTLS12: + return "TLS 1.2" + case tls.VersionTLS13: + return "TLS 1.3" + default: + return "unknown" + } +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go new file mode 100644 index 0000000000..29a240b179 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver/tlsconfig.go @@ -0,0 +1,105 @@ +package apiserver + +import ( + "crypto/tls" + + apiconfigv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +// GetSecurityProfileConfig extracts the minimum TLS version and cipher suites +// from a TLSSecurityProfile object. Converts OpenSSL cipher names to Go TLS cipher IDs. +// If profile is nil, returns config defined by the Intermediate TLS Profile. +func GetSecurityProfileConfig(profile *apiconfigv1.TLSSecurityProfile) (uint16, []uint16) { + var profileType apiconfigv1.TLSProfileType + if profile == nil { + profileType = apiconfigv1.TLSProfileIntermediateType + } else { + profileType = profile.Type + } + + var profileSpec *apiconfigv1.TLSProfileSpec + if profileType == apiconfigv1.TLSProfileCustomType { + if profile.Custom != nil { + profileSpec = &profile.Custom.TLSProfileSpec + } + } else { + profileSpec = apiconfigv1.TLSProfiles[profileType] + } + + // nothing found / custom type set but no actual custom spec + if profileSpec == nil { + profileSpec = apiconfigv1.TLSProfiles[apiconfigv1.TLSProfileIntermediateType] + } + + // Convert the TLS version string to the Go constant + minTLSVersion, err := crypto.TLSVersion(string(profileSpec.MinTLSVersion)) + if err != nil { + // Fallback to default if conversion fails + minTLSVersion = crypto.DefaultTLSVersion() + } + + // Convert OpenSSL cipher names to IANA names, then to Go cipher suite IDs + ianaCipherNames := crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers) + cipherSuites := CipherNamesToIDs(ianaCipherNames) + + return minTLSVersion, cipherSuites +} + +// CipherNamesToIDs converts IANA cipher suite names to Go TLS cipher suite IDs +func CipherNamesToIDs(cipherNames []string) []uint16 { + var cipherIDs []uint16 + + for _, name := range cipherNames { + if id, err := crypto.CipherSuite(name); err == nil { + cipherIDs = append(cipherIDs, id) + } + } + + // If no valid ciphers were found, use defaults + if len(cipherIDs) == 0 { + cipherIDs = crypto.DefaultCiphers() + } + + return cipherIDs +} + +// ApplySecureDefaults applies secure default TLS settings to the provided config. +// This ensures a minimum security baseline even when no cluster-wide profile is configured. +func ApplySecureDefaults(config *tls.Config) error { + if config.MinVersion == 0 { + config.MinVersion = crypto.DefaultTLSVersion() + } + if len(config.CipherSuites) == 0 { + config.CipherSuites = crypto.DefaultCiphers() + } + config.PreferServerCipherSuites = true + return nil +} + +// GetConfigForClient returns a GetConfigForClient callback function that can be used +// with tls.Config to provide per-connection dynamic TLS configuration updates. +// This allows the TLS settings to be updated without restarting the server. +// +// Example usage: +// +// server := &http.Server{ +// Addr: ":8443", +// TLSConfig: &tls.Config{ +// GetConfigForClient: apiserver.GetConfigForClient(querier), +// // Other settings like Certificates, ClientCAs, etc. +// }, +// } +func GetConfigForClient(querier Querier) func(*tls.ClientHelloInfo) (*tls.Config, error) { + return func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + // Create a new config for this connection + config := &tls.Config{} + + // Apply cluster-wide TLS profile settings + if err := querier.QueryTLSConfig(config); err != nil { + return nil, err + } + + return config, nil + } +} diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/available.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go similarity index 73% rename from vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/available.go rename to vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go index 03eff9850a..debf483a26 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/available.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig/available.go @@ -1,4 +1,4 @@ -package proxy +package openshiftconfig import ( "errors" @@ -14,12 +14,11 @@ const ( notSupportedErrorMessage = "server does not support API version" ) -// IsAPIAvailable return true if OpenShift config API is present on the cluster. +// IsAPIAvailable returns true if OpenShift config API is present on the cluster. // Otherwise, supported is set to false. -func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (supported bool, err error) { +func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (bool, error) { if discovery == nil { - err = errors.New("discovery interface can not be ") - return + return false, errors.New("discovery interface can not be ") } opStatusGV := schema.GroupVersion{ @@ -28,13 +27,11 @@ func IsAPIAvailable(discovery apidiscovery.DiscoveryInterface) (supported bool, } if discoveryErr := apidiscovery.ServerSupportsVersion(discovery, opStatusGV); discoveryErr != nil { if strings.Contains(discoveryErr.Error(), notSupportedErrorMessage) { - return + return false, nil } - err = discoveryErr - return + return false, discoveryErr } - supported = true - return + return true, nil } diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/syncer.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/syncer.go index b31df18bc9..6647481390 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/syncer.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/proxy/syncer.go @@ -12,7 +12,6 @@ import ( "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/discovery" ) const ( @@ -24,7 +23,7 @@ const ( ) // NewSyncer returns informer and sync functions to enable watch of Proxy type. -func NewSyncer(logger *logrus.Logger, client configv1client.Interface, discovery discovery.DiscoveryInterface) (proxyInformer configv1.ProxyInformer, syncer *Syncer, querier Querier, err error) { +func NewSyncer(logger *logrus.Logger, client configv1client.Interface) (proxyInformer configv1.ProxyInformer, syncer *Syncer, querier Querier, err error) { factory := externalversions.NewSharedInformerFactoryWithOptions(client, defaultSyncInterval) proxyInformer = factory.Config().V1().Proxies() s := &Syncer{ diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go index 90608987ea..f56622ec52 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/lib/server/server.go @@ -8,6 +8,7 @@ import ( "net/http" "path/filepath" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/filemonitor" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/profile" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -58,13 +59,20 @@ func WithKubeConfig(config *rest.Config) Option { } } +func WithAPIServerTLSQuerier(querier apiserver.Querier) Option { + return func(sc *serverConfig) { + sc.apiServerTLSQuerier = querier + } +} + type serverConfig struct { - logger *logrus.Logger - tlsCertPath *string - tlsKeyPath *string - clientCAPath *string - kubeConfig *rest.Config - debug bool + logger *logrus.Logger + tlsCertPath *string + tlsKeyPath *string + clientCAPath *string + kubeConfig *rest.Config + apiServerTLSQuerier apiserver.Querier + debug bool } func (sc *serverConfig) apply(options []Option) { @@ -75,12 +83,13 @@ func (sc *serverConfig) apply(options []Option) { func defaultServerConfig() serverConfig { return serverConfig{ - tlsCertPath: nil, - tlsKeyPath: nil, - clientCAPath: nil, - kubeConfig: nil, - logger: nil, - debug: false, + tlsCertPath: nil, + tlsKeyPath: nil, + clientCAPath: nil, + kubeConfig: nil, + logger: nil, + apiServerTLSQuerier: nil, + debug: false, } } func (sc *serverConfig) tlsEnabled() (bool, error) { @@ -213,6 +222,14 @@ func (sc serverConfig) getListenAndServeFunc() (func() error, error) { tlsCfg.ClientCAs = certPoolStore.GetCertPool() tlsCfg.ClientAuth = tls.VerifyClientCertIfGiven } + + // Overlay cluster-wide TLS security profile settings if available + if sc.apiServerTLSQuerier != nil { + if err := sc.apiServerTLSQuerier.QueryTLSConfig(tlsCfg); err != nil { + sc.logger.WithError(err).Warn("Failed to query APIServer TLS config, using defaults") + } + } + return tlsCfg, nil }, NextProtos: []string{"http/1.1"}, // Disable HTTP/2 for security diff --git a/vendor/modules.txt b/vendor/modules.txt index 942da8a821..aaf3725837 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -625,6 +625,9 @@ github.com/openshift/client-go/config/informers/externalversions/internalinterfa github.com/openshift/client-go/config/listers/config/v1 github.com/openshift/client-go/config/listers/config/v1alpha1 github.com/openshift/client-go/config/listers/config/v1alpha2 +# github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 +## explicit; go 1.24.0 +github.com/openshift/library-go/pkg/crypto # github.com/operator-framework/api v0.37.0 => ./staging/api ## explicit; go 1.24.6 github.com/operator-framework/api/crds @@ -695,6 +698,7 @@ github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/projection github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver/solver github.com/operator-framework/operator-lifecycle-manager/pkg/feature +github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver github.com/operator-framework/operator-lifecycle-manager/pkg/lib/catalogsource github.com/operator-framework/operator-lifecycle-manager/pkg/lib/clients github.com/operator-framework/operator-lifecycle-manager/pkg/lib/codec @@ -716,6 +720,7 @@ github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubernetes/pkg/ github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubernetes/plugin/pkg/auth/authorizer/rbac github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubestate github.com/operator-framework/operator-lifecycle-manager/pkg/lib/labeler +github.com/operator-framework/operator-lifecycle-manager/pkg/lib/openshiftconfig github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorstatus From cc75f1f6ff3066813166263442ae04deb196aba3 Mon Sep 17 00:00:00 2001 From: Camila Macedo <7708031+camilamacedo86@users.noreply.github.com> Date: Thu, 22 Jan 2026 19:43:13 +0000 Subject: [PATCH 6/6] (e2e): Enhance e2e test to check workload resilience when catalogs goes away (#3747) Upstream-repository: operator-lifecycle-manager Upstream-commit: d30e23ddb3b5e1ded058b62284d46c21c86d2966 --- .../test/e2e/catalog_e2e_test.go | 245 +++++++----------- 1 file changed, 99 insertions(+), 146 deletions(-) diff --git a/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go b/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go index f003611e92..7a375d3e54 100644 --- a/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go +++ b/staging/operator-lifecycle-manager/test/e2e/catalog_e2e_test.go @@ -1864,28 +1864,23 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun By("Wait for operator deployment to be ready") var operatorDeployment *appsv1.Deployment - Eventually(func() error { + Eventually(func(g Gomega) { + var err error operatorDeployment, err = c.GetDeployment(generatedNamespace.GetName(), deploymentName) - if err != nil { - return err - } - if operatorDeployment.Spec.Replicas == nil || *operatorDeployment.Spec.Replicas == 0 { - return fmt.Errorf("deployment replicas is not set") - } - if operatorDeployment.Status.AvailableReplicas != *operatorDeployment.Spec.Replicas { - return fmt.Errorf("deployment %s not ready: %d/%d replicas available", - deploymentName, - operatorDeployment.Status.AvailableReplicas, - *operatorDeployment.Spec.Replicas) - } - if operatorDeployment.Status.ReadyReplicas != *operatorDeployment.Spec.Replicas { - return fmt.Errorf("deployment %s not ready: %d/%d replicas ready", - deploymentName, - operatorDeployment.Status.ReadyReplicas, - *operatorDeployment.Spec.Replicas) - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(operatorDeployment.Spec.Replicas).NotTo(BeNil()) + g.Expect(*operatorDeployment.Spec.Replicas).NotTo(BeZero()) + g.Expect(operatorDeployment.Status.AvailableReplicas).To(Equal(*operatorDeployment.Spec.Replicas), + "deployment %s not ready: %d/%d replicas available", + deploymentName, + operatorDeployment.Status.AvailableReplicas, + *operatorDeployment.Spec.Replicas) + g.Expect(operatorDeployment.Status.ReadyReplicas).To(Equal(*operatorDeployment.Spec.Replicas), + "deployment %s not ready: %d/%d replicas ready", + deploymentName, + operatorDeployment.Status.ReadyReplicas, + *operatorDeployment.Spec.Replicas) + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Record deployment state before catalog deletion") deploymentUID := operatorDeployment.UID @@ -1893,14 +1888,15 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun By("Verify ServiceAccount, Role, and RoleBinding created by OLM") var serviceAccount *corev1.ServiceAccount - Eventually(func() error { + Eventually(func(g Gomega) { + var err error serviceAccount, err = c.KubernetesInterface().CoreV1().ServiceAccounts(generatedNamespace.GetName()).Get( context.Background(), serviceAccountName, metav1.GetOptions{}, ) - return err - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) serviceAccountUID := serviceAccount.UID // Roles and RoleBindings are owned by the CSV with generated names, so we list them by owner @@ -1911,35 +1907,27 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun }) var roleList *rbacv1.RoleList - Eventually(func() error { + Eventually(func(g Gomega) { + var err error roleList, err = c.KubernetesInterface().RbacV1().Roles(generatedNamespace.GetName()).List( context.Background(), metav1.ListOptions{LabelSelector: ownerSelector.String()}, ) - if err != nil { - return err - } - if len(roleList.Items) == 0 { - return fmt.Errorf("no roles found owned by CSV") - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(roleList.Items).ToNot(BeEmpty(), "no roles found owned by CSV") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) roleUID := roleList.Items[0].UID var roleBindingList *rbacv1.RoleBindingList - Eventually(func() error { + Eventually(func(g Gomega) { + var err error roleBindingList, err = c.KubernetesInterface().RbacV1().RoleBindings(generatedNamespace.GetName()).List( context.Background(), metav1.ListOptions{LabelSelector: ownerSelector.String()}, ) - if err != nil { - return err - } - if len(roleBindingList.Items) == 0 { - return fmt.Errorf("no rolebindings found owned by CSV") - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(roleBindingList.Items).ToNot(BeEmpty(), "no rolebindings found owned by CSV") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) roleBindingUID := roleBindingList.Items[0].UID By("Delete catalog source") @@ -1947,108 +1935,71 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun Expect(err).ShouldNot(HaveOccurred()) By("Wait for catalog source to be removed") - Eventually(func() error { + Eventually(func(g Gomega) { _, err := crc.OperatorsV1alpha1().CatalogSources(catalogSource.GetNamespace()).Get(context.Background(), catalogSource.GetName(), metav1.GetOptions{}) - if err == nil { - return fmt.Errorf("catalog source still exists") - } - if !k8serror.IsNotFound(err) { - return err - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(k8serror.IsNotFound(err)).To(BeTrue(), "catalog source should be deleted") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Wait for catalog source pod to be deleted") - Eventually(func() error { + Eventually(func(g Gomega) { listOpts := metav1.ListOptions{ LabelSelector: "olm.catalogSource=" + catalogSourceName, } pods, err := c.KubernetesInterface().CoreV1().Pods(catalogSource.GetNamespace()).List(context.Background(), listOpts) - if err != nil { - return err - } - if len(pods.Items) > 0 { - return fmt.Errorf("catalog source pod still exists: %d pods found", len(pods.Items)) - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(pods.Items).To(BeEmpty(), "catalog source pod should be deleted") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Verify subscription behavior after catalog deletion") - Eventually(func() error { + Eventually(func(g Gomega) { sub, err := crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Get( context.Background(), subscriptionName, metav1.GetOptions{}, ) - if err != nil { - return fmt.Errorf("failed to get subscription: %w", err) - } + g.Expect(err).ShouldNot(HaveOccurred(), "failed to get subscription") // Subscription should still track the installed CSV - if sub.Status.InstalledCSV != packageStable { - return fmt.Errorf("subscription InstalledCSV changed from %s to %s", packageStable, sub.Status.InstalledCSV) - } + g.Expect(sub.Status.InstalledCSV).To(Equal(packageStable), "subscription InstalledCSV should not change") // Verify catalog health behavior: if the deleted catalog is still in the health list, // it should be marked as unhealthy. If it's been removed from the list, that's also acceptable. for _, health := range sub.Status.CatalogHealth { if health.CatalogSourceRef != nil && health.CatalogSourceRef.Name == catalogSourceName { - if health.Healthy { - return fmt.Errorf("subscription still reports deleted catalog %s as healthy", catalogSourceName) - } + g.Expect(health.Healthy).To(BeFalse(), "subscription should not report deleted catalog %s as healthy", catalogSourceName) } } - - return nil - }, pollDuration, pollInterval).Should(Succeed()) + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Verify CSV remains in succeeded state after catalog deletion") - Consistently(func() error { + Consistently(func(g Gomega) { fetchedCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(generatedNamespace.GetName()).Get( context.Background(), installedCSV.GetName(), metav1.GetOptions{}, ) - if err != nil { - return fmt.Errorf("failed to get CSV: %w", err) - } - if fetchedCSV.Status.Phase != v1alpha1.CSVPhaseSucceeded { - return fmt.Errorf("CSV phase is %s, expected Succeeded", fetchedCSV.Status.Phase) - } - return nil - }, 3*time.Minute, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred(), "failed to get CSV") + g.Expect(fetchedCSV.Status.Phase).To(Equal(v1alpha1.CSVPhaseSucceeded), "CSV should remain in Succeeded state") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Verify deployment remains healthy and unchanged") - Consistently(func() error { + Consistently(func(g Gomega) { deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) - if err != nil { - return fmt.Errorf("failed to get deployment: %w", err) - } - if deployment.UID != deploymentUID { - return fmt.Errorf("deployment was recreated") - } - if deployment.Spec.Replicas == nil { - return fmt.Errorf("deployment replicas is nil") - } - if deployment.Status.AvailableReplicas != expectedReplicas { - return fmt.Errorf("available replicas: got %d, want %d", deployment.Status.AvailableReplicas, expectedReplicas) - } - if deployment.Status.ReadyReplicas != expectedReplicas { - return fmt.Errorf("ready replicas: got %d, want %d", deployment.Status.ReadyReplicas, expectedReplicas) - } - return nil - }, 3*time.Minute, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred(), "failed to get deployment") + g.Expect(deployment.UID).To(Equal(deploymentUID), "deployment should not be recreated") + g.Expect(deployment.Spec.Replicas).NotTo(BeNil(), "deployment replicas should be set") + g.Expect(deployment.Status.AvailableReplicas).To(Equal(expectedReplicas), "available replicas should match expected") + g.Expect(deployment.Status.ReadyReplicas).To(Equal(expectedReplicas), "ready replicas should match expected") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Test OLM config management - add environment variable via subscription") - Eventually(func() error { + Eventually(func(g Gomega) { sub, err := crc.OperatorsV1alpha1().Subscriptions(generatedNamespace.GetName()).Get( context.Background(), subscriptionName, metav1.GetOptions{}, ) - if err != nil { - return err - } + g.Expect(err).ShouldNot(HaveOccurred()) if sub.Spec.Config == nil { sub.Spec.Config = &v1alpha1.SubscriptionConfig{} @@ -2062,26 +2013,25 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun sub, metav1.UpdateOptions{}, ) - return err - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred()) + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Wait for deployment to have the environment variable") - Eventually(func() error { + Eventually(func(g Gomega) { deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) - if err != nil { - return err - } - if len(deployment.Spec.Template.Spec.Containers) == 0 { - return fmt.Errorf("no containers in deployment") - } + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(deployment.Spec.Template.Spec.Containers).NotTo(BeEmpty(), "deployment should have containers") + container := deployment.Spec.Template.Spec.Containers[0] + envVarFound := false for _, env := range container.Env { if env.Name == "TEST_ENV_VAR" && env.Value == "test-value" { - return nil + envVarFound = true + break } } - return fmt.Errorf("TEST_ENV_VAR not found in deployment") - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(envVarFound).To(BeTrue(), "TEST_ENV_VAR should be found in deployment") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Delete the operator deployment to test OLM reconciliation") err = c.KubernetesInterface().AppsV1().Deployments(generatedNamespace.GetName()).Delete( @@ -2092,43 +2042,44 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun Expect(err).ShouldNot(HaveOccurred()) By("Wait for deployment to be deleted") - Eventually(func() error { + Eventually(func(g Gomega) { _, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) - if err == nil { - return fmt.Errorf("deployment still exists") - } - if !k8serror.IsNotFound(err) { - return err - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(k8serror.IsNotFound(err)).To(BeTrue(), "deployment should be deleted") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) By("Wait for OLM to recreate the deployment") - Eventually(func() error { + // Use a longer timeout here since OLM needs to: + // 1. Detect the deployment deletion (via watch or reconciliation loop) + // 2. Recreate the deployment + // 3. Wait for the deployment to become ready (pull image, start pod, etc.) + // In slow/busy CI environments, this can take longer than the standard 5 minutes + Eventually(func(g Gomega) { deployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) - if err != nil { - return fmt.Errorf("deployment not recreated yet: %w", err) - } - if deployment.UID == deploymentUID { - return fmt.Errorf("deployment UID unchanged, not recreated") - } - if deployment.Spec.Replicas == nil { - return fmt.Errorf("deployment replicas is nil") - } - if deployment.Status.AvailableReplicas != expectedReplicas { - return fmt.Errorf("available replicas: got %d, want %d", deployment.Status.AvailableReplicas, expectedReplicas) - } - if deployment.Status.ReadyReplicas != expectedReplicas { - return fmt.Errorf("ready replicas: got %d, want %d", deployment.Status.ReadyReplicas, expectedReplicas) - } - return nil - }, pollDuration, pollInterval).Should(Succeed()) + g.Expect(err).ShouldNot(HaveOccurred(), "deployment should exist") + g.Expect(deployment.UID).NotTo(Equal(deploymentUID), "deployment should have new UID (recreated)") + g.Expect(deployment.Spec.Replicas).NotTo(BeNil(), "deployment replicas should be set") + g.Expect(*deployment.Spec.Replicas).NotTo(BeZero(), "deployment replicas should not be zero") + + // Check that pods are actually ready, not just that the deployment exists + g.Expect(deployment.Status.AvailableReplicas).To(Equal(expectedReplicas), + "deployment should have %d available replicas, got %d", expectedReplicas, deployment.Status.AvailableReplicas) + g.Expect(deployment.Status.ReadyReplicas).To(Equal(expectedReplicas), + "deployment should have %d ready replicas, got %d", expectedReplicas, deployment.Status.ReadyReplicas) + g.Expect(deployment.Status.UpdatedReplicas).To(Equal(expectedReplicas), + "deployment should have %d updated replicas, got %d", expectedReplicas, deployment.Status.UpdatedReplicas) + }).WithTimeout(8 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) By("Verify all resources were recreated by OLM with correct configuration") - recreatedDeployment, err := c.GetDeployment(generatedNamespace.GetName(), deploymentName) - Expect(err).ShouldNot(HaveOccurred()) - Expect(recreatedDeployment.UID).ToNot(Equal(deploymentUID), "deployment should have been recreated with new UID") + // Re-fetch the deployment to get the latest state after recreation + var recreatedDeployment *appsv1.Deployment + Eventually(func(g Gomega) { + var err error + recreatedDeployment, err = c.GetDeployment(generatedNamespace.GetName(), deploymentName) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(recreatedDeployment.UID).ToNot(Equal(deploymentUID), "deployment should have been recreated with new UID") + }).WithTimeout(pollDuration).WithPolling(pollInterval).Should(Succeed()) + // Verify ServiceAccount was NOT recreated (should have same UID) recreatedServiceAccount, err := c.KubernetesInterface().CoreV1().ServiceAccounts(generatedNamespace.GetName()).Get( context.Background(), serviceAccountName, @@ -2137,6 +2088,7 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun Expect(err).ShouldNot(HaveOccurred()) Expect(recreatedServiceAccount.UID).To(Equal(serviceAccountUID), "serviceaccount should not have been recreated (same UID)") + // Verify Role was NOT recreated (should have same UID) recreatedRoleList, err := c.KubernetesInterface().RbacV1().Roles(generatedNamespace.GetName()).List( context.Background(), metav1.ListOptions{LabelSelector: ownerSelector.String()}, @@ -2145,6 +2097,7 @@ var _ = Describe("Starting CatalogSource e2e tests", Label("CatalogSource"), fun Expect(len(recreatedRoleList.Items)).To(BeNumerically(">", 0), "at least one role should exist") Expect(recreatedRoleList.Items[0].UID).To(Equal(roleUID), "role should not have been recreated (same UID)") + // Verify RoleBinding was NOT recreated (should have same UID) recreatedRoleBindingList, err := c.KubernetesInterface().RbacV1().RoleBindings(generatedNamespace.GetName()).List( context.Background(), metav1.ListOptions{LabelSelector: ownerSelector.String()},