From c4895d7e85559b2460a83dc51912c371c1d570fa Mon Sep 17 00:00:00 2001 From: Stephen Benjamin Date: Mon, 7 Jul 2025 11:07:29 -0400 Subject: [PATCH] Migrate origin to use OTE, and implement extension-derived suites Refactors openshift-tests to use OpenShift Tests Extension (OTE) framework for its own tests, and implements externally defined suites. * Consolidated the many scattered places origin filters tests down to the execution set using filter chains * Vendors and uses most OTE-provided Cobra commands * Replaced legacy run-test command with OTE, there's only one way tests are executed now * Added list command with subcommands for suites, extensions, and tests * Migrated origin existing suites to use CEL expressions * Retrieves suites from binaries, and makes them available for use --- cmd/openshift-tests/openshift-tests.go | 46 +- go.mod | 1 + go.sum | 4 +- pkg/clioptions/clusterdiscovery/cluster.go | 166 +- pkg/clioptions/clusterdiscovery/provider.go | 9 +- .../clusterdiscovery/provider_test.go | 342 --- .../suiteselection/feature_filter.go | 86 - pkg/clioptions/suiteselection/group_filter.go | 51 - pkg/clioptions/suiteselection/suite_flags.go | 64 +- pkg/cmd/openshift-tests/list/extensions.go | 148 + pkg/cmd/openshift-tests/list/root.go | 28 + pkg/cmd/openshift-tests/list/suites.go | 83 + pkg/cmd/openshift-tests/run-test/command.go | 82 - pkg/cmd/openshift-tests/run-upgrade/flags.go | 19 +- .../openshift-tests/run-upgrade/options.go | 9 +- pkg/cmd/openshift-tests/run/command.go | 38 +- pkg/cmd/openshift-tests/run/flags.go | 42 +- pkg/cmd/openshift-tests/run/options.go | 19 +- pkg/cmd/util.go | 17 +- pkg/test/extensions/binary.go | 184 +- pkg/test/extensions/types.go | 182 +- pkg/test/extensions/util.go | 11 +- pkg/test/filters/chain.go | 82 + pkg/test/filters/chain_test.go | 90 + pkg/test/filters/cluster_state.go | 175 ++ pkg/test/filters/cluster_state_test.go | 251 ++ pkg/test/filters/disabled.go | 75 + .../filters/disabled_test.go} | 2 +- pkg/test/filters/kube_rebase.go | 72 + pkg/test/filters/kube_rebase_test.go | 29 + pkg/test/filters/match_function.go | 39 + pkg/test/filters/match_function_test.go | 29 + pkg/test/filters/qualifiers.go | 42 + pkg/test/filters/qualifiers_test.go | 141 + pkg/test/ginkgo/cmd_runsuite.go | 334 +- pkg/test/ginkgo/cmd_runtest.go | 195 -- pkg/test/ginkgo/test_runner.go | 115 +- pkg/test/ginkgo/test_suite.go | 116 +- pkg/testsuites/cni.go | 30 - pkg/testsuites/filters.go | 94 +- pkg/testsuites/minimal.go | 2684 ----------------- pkg/testsuites/standard_suites.go | 354 +-- pkg/testsuites/suites_test.go | 51 + pkg/testsuites/upgrade_suites.go | 25 +- .../openshift-tests-extension/LICENSE | 201 ++ .../pkg/cmd/cmdinfo/info.go | 38 + .../pkg/cmd/cmdlist/list.go | 133 + .../pkg/cmd/cmdrun/runsuite.go | 64 + .../pkg/cmd/cmdrun/runtest.go | 81 + .../pkg/dbtime/time.go | 26 + .../pkg/extension/extension.go | 159 + .../extension/extensiontests/environment.go | 92 + .../pkg/extension/extensiontests/result.go | 12 + .../extension/extensiontests/result_writer.go | 71 + .../pkg/extension/extensiontests/spec.go | 567 ++++ .../pkg/extension/extensiontests/task.go | 31 + .../pkg/extension/extensiontests/types.go | 104 + .../pkg/extension/registry.go | 39 + .../pkg/extension/types.go | 91 + .../pkg/flags/component.go | 25 + .../pkg/flags/concurrency.go | 23 + .../pkg/flags/environment.go | 114 + .../pkg/flags/names.go | 24 + .../pkg/flags/output.go | 95 + .../pkg/flags/suite.go | 21 + .../pkg/ginkgo/util.go | 177 ++ .../pkg/util/sets/LICENSE | 202 ++ .../pkg/util/sets/README.md | 3 + .../pkg/util/sets/byte.go | 137 + .../pkg/util/sets/doc.go | 19 + .../pkg/util/sets/empty.go | 21 + .../pkg/util/sets/int.go | 137 + .../pkg/util/sets/int32.go | 137 + .../pkg/util/sets/int64.go | 137 + .../pkg/util/sets/set.go | 236 ++ .../pkg/util/sets/string.go | 137 + .../pkg/version/version.go | 11 + vendor/modules.txt | 12 + 78 files changed, 5578 insertions(+), 4455 deletions(-) delete mode 100644 pkg/clioptions/clusterdiscovery/provider_test.go delete mode 100644 pkg/clioptions/suiteselection/feature_filter.go delete mode 100644 pkg/clioptions/suiteselection/group_filter.go create mode 100644 pkg/cmd/openshift-tests/list/extensions.go create mode 100644 pkg/cmd/openshift-tests/list/root.go create mode 100644 pkg/cmd/openshift-tests/list/suites.go delete mode 100644 pkg/cmd/openshift-tests/run-test/command.go create mode 100644 pkg/test/filters/chain.go create mode 100644 pkg/test/filters/chain_test.go create mode 100644 pkg/test/filters/cluster_state.go create mode 100644 pkg/test/filters/cluster_state_test.go create mode 100644 pkg/test/filters/disabled.go rename pkg/{testsuites/standard_suites_test.go => test/filters/disabled_test.go} (99%) create mode 100644 pkg/test/filters/kube_rebase.go create mode 100644 pkg/test/filters/kube_rebase_test.go create mode 100644 pkg/test/filters/match_function.go create mode 100644 pkg/test/filters/match_function_test.go create mode 100644 pkg/test/filters/qualifiers.go create mode 100644 pkg/test/filters/qualifiers_test.go delete mode 100644 pkg/test/ginkgo/cmd_runtest.go delete mode 100644 pkg/testsuites/cni.go delete mode 100644 pkg/testsuites/minimal.go create mode 100644 pkg/testsuites/suites_test.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go create mode 100644 vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go diff --git a/cmd/openshift-tests/openshift-tests.go b/cmd/openshift-tests/openshift-tests.go index 1d06b4145f45..e5c0aa79bb68 100644 --- a/cmd/openshift-tests/openshift-tests.go +++ b/cmd/openshift-tests/openshift-tests.go @@ -3,18 +3,27 @@ package main import ( "flag" "fmt" - "math/rand" "os" "os/exec" "syscall" - "time" + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo" + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun" "github.com/openshift/library-go/pkg/serviceability" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericclioptions" + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubectl/pkg/util/templates" + "github.com/openshift/origin/pkg/cmd" collectdiskcertificates "github.com/openshift/origin/pkg/cmd/openshift-tests/collect-disk-certificates" "github.com/openshift/origin/pkg/cmd/openshift-tests/dev" "github.com/openshift/origin/pkg/cmd/openshift-tests/disruption" "github.com/openshift/origin/pkg/cmd/openshift-tests/images" + "github.com/openshift/origin/pkg/cmd/openshift-tests/list" "github.com/openshift/origin/pkg/cmd/openshift-tests/monitor" run_monitor "github.com/openshift/origin/pkg/cmd/openshift-tests/monitor/run" "github.com/openshift/origin/pkg/cmd/openshift-tests/monitor/timeline" @@ -22,19 +31,11 @@ import ( risk_analysis "github.com/openshift/origin/pkg/cmd/openshift-tests/risk-analysis" "github.com/openshift/origin/pkg/cmd/openshift-tests/run" run_disruption "github.com/openshift/origin/pkg/cmd/openshift-tests/run-disruption" - run_test "github.com/openshift/origin/pkg/cmd/openshift-tests/run-test" run_upgrade "github.com/openshift/origin/pkg/cmd/openshift-tests/run-upgrade" "github.com/openshift/origin/pkg/cmd/openshift-tests/run_resource_watch" versioncmd "github.com/openshift/origin/pkg/cmd/openshift-tests/version" - testginkgo "github.com/openshift/origin/pkg/test/ginkgo" + "github.com/openshift/origin/pkg/test/extensions" exutil "github.com/openshift/origin/test/extended/util" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "k8s.io/cli-runtime/pkg/genericclioptions" - utilflag "k8s.io/component-base/cli/flag" - "k8s.io/component-base/logs" - "k8s.io/kubectl/pkg/util/templates" ) func main() { @@ -53,14 +54,23 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - logrus.SetLevel(logrus.InfoLevel) - rand.Seed(time.Now().UTC().UnixNano()) + // The GCE PD drivers were removed in kube 1.31, so we can ignore the env var that + // some automation sets. + if os.Getenv("ENABLE_STORAGE_GCE_PD_DRIVER") != "" { + logrus.Warn("ENABLE_STORAGE_GCE_PD_DRIVER is set, but is not supported") + os.Unsetenv("ENABLE_STORAGE_GCE_PD_DRIVER") + } pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) //pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + extensionRegistry, originExtension, err := extensions.InitializeOpenShiftTestsExtensionFramework() + if err != nil { + panic(err) + } + root := &cobra.Command{ Long: templates.LongDesc(`This command verifies behavior of an OpenShift cluster by running remote tests against the cluster API that exercise functionality. In general these tests may be disruptive @@ -79,10 +89,12 @@ func main() { } root.AddCommand( - run.NewRunCommand(ioStreams), + run.NewRunCommand(ioStreams, originExtension), + list.NewListCommand(ioStreams, extensionRegistry), + cmdinfo.NewInfoCommand(extensionRegistry), run_upgrade.NewRunUpgradeCommand(ioStreams), images.NewImagesCommand(), - run_test.NewRunTestCommand(ioStreams), + cmdrun.NewRunTestCommand(extensionRegistry), dev.NewDevCommand(), run_monitor.NewRunMonitorCommand(ioStreams), monitor.NewMonitorCommand(ioStreams), @@ -106,10 +118,6 @@ func main() { defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() return root.Execute() }(); err != nil { - if ex, ok := err.(testginkgo.ExitError); ok { - fmt.Fprintf(os.Stderr, "Ginkgo exit error %d: %v\n", ex.Code, err) - os.Exit(ex.Code) - } fmt.Fprintf(os.Stderr, "error: %v\n", err) os.Exit(1) } diff --git a/go.mod b/go.mod index 284f25ef7fdb..0124bc637c2d 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/opencontainers/go-digest v1.0.0 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20250522124649-4ffcd156ec7c github.com/openshift-kni/commatrix v0.0.4-0.20250604173218-064b4004e9fb github.com/openshift/api v0.0.0-20250513132935-9052dea86694 github.com/openshift/apiserver-library-go v0.0.0-20250127121756-dc9a973f14ce diff --git a/go.sum b/go.sum index 8f47c3cd80b5..1f8ade6806e0 100644 --- a/go.sum +++ b/go.sum @@ -609,8 +609,8 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift-eng/openshift-tests-extension v0.0.0-20250220212757-b9c4d98a0c45 h1:hXpbYtP3iTh8oy/RKwKkcMziwchY3fIk95ciczf7cOA= -github.com/openshift-eng/openshift-tests-extension v0.0.0-20250220212757-b9c4d98a0c45/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20250522124649-4ffcd156ec7c h1:R5dI2oOF2RtS1sKtLrhW9KMg0ydzF0XM2Q//ma55nWI= +github.com/openshift-eng/openshift-tests-extension v0.0.0-20250522124649-4ffcd156ec7c/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= github.com/openshift-kni/commatrix v0.0.4-0.20250604173218-064b4004e9fb h1:+owPvmRBKN5dYuVQ7/CROW0h6wL6Hk8MJlf2vnZQfSM= github.com/openshift-kni/commatrix v0.0.4-0.20250604173218-064b4004e9fb/go.mod h1:R8JhlXqlLwe3N6nQheK2KpMEESIlDVCUw5TOLKiZ08s= github.com/openshift/api v0.0.0-20250513132935-9052dea86694 h1:kPnk1+m89LJHexYsTP+MVM9OgJLxcpUR3vRdMQNF66s= diff --git a/pkg/clioptions/clusterdiscovery/cluster.go b/pkg/clioptions/clusterdiscovery/cluster.go index e07d246c0c68..d10c444fec85 100644 --- a/pkg/clioptions/clusterdiscovery/cluster.go +++ b/pkg/clioptions/clusterdiscovery/cluster.go @@ -7,8 +7,11 @@ import ( "io/ioutil" "net/http" "net/url" + "slices" "strings" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime/schema" corev1 "k8s.io/api/core/v1" @@ -22,6 +25,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" configclient "github.com/openshift/client-go/config/clientset/versioned" operatorclient "github.com/openshift/client-go/operator/clientset/versioned" + "github.com/openshift/origin/test/extended/util/azure" ) @@ -70,6 +74,13 @@ type ClusterConfiguration struct { // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled HasNoOptionalCapabilities bool + + // APIGroups contains the set of API groups available in the cluster + APIGroups sets.Set[string] `json:"-"` + // EnabledFeatureGates contains the set of enabled feature gates in the cluster + EnabledFeatureGates sets.Set[string] `json:"-"` + // DisabledFeatureGates contains the set of disabled feature gates in the cluster + DisabledFeatureGates sets.Set[string] `json:"-"` } func (c *ClusterConfiguration) ToJSONString() string { @@ -91,6 +102,80 @@ type ClusterState struct { ControlPlaneTopology *configv1.TopologyMode OptionalCapabilities []configv1.ClusterVersionCapability Version *configv1.ClusterVersion + APIGroups sets.Set[string] + EnabledFeatureGates sets.Set[string] + DisabledFeatureGates sets.Set[string] +} + +// discoverAPIGroups discovers available API groups in the cluster +func discoverAPIGroups(coreClient clientset.Interface) (sets.Set[string], error) { + logrus.Debugf("Discovering API Groups...") + discoveryClient := coreClient.Discovery() + groups, err := discoveryClient.ServerGroups() + if err != nil { + return nil, err + } + + apiGroups := sets.New[string]() + for _, apiGroup := range groups.Groups { + // ignore the empty group + if apiGroup.Name == "" { + continue + } + apiGroups.Insert(apiGroup.Name) + } + + sortedAPIGroups := apiGroups.UnsortedList() + slices.Sort(sortedAPIGroups) + + logrus.WithField("apiGroups", strings.Join(sortedAPIGroups, ", ")). + Debugf("Discovered %d API Groups", apiGroups.Len()) + + return apiGroups, nil +} + +// discoverFeatureGates discovers feature gates in the cluster +func discoverFeatureGates(configClient configclient.Interface, clusterVersion *configv1.ClusterVersion) (enabled, disabled sets.Set[string], err error) { + logrus.Debugf("Discovering feature gates...") + featureGate, err := configClient.ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, nil, errors.WithMessage(err, "encountered an error while discovering feature gates") + } + + desiredVersion := clusterVersion.Status.Desired.Version + if len(desiredVersion) == 0 && len(clusterVersion.Status.History) > 0 { + desiredVersion = clusterVersion.Status.History[0].Version + } + + enabled = sets.New[string]() + disabled = sets.New[string]() + for _, featureGateValues := range featureGate.Status.FeatureGates { + if featureGateValues.Version != desiredVersion { + logrus.Warningf("Feature gates for version %s not found, skipping", desiredVersion) + continue + } + for _, enabledGate := range featureGateValues.Enabled { + enabled.Insert(string(enabledGate.Name)) + } + for _, disabledGate := range featureGateValues.Disabled { + disabled.Insert(string(disabledGate.Name)) + } + break + } + + sortedEnabledGates := enabled.UnsortedList() + slices.Sort(sortedEnabledGates) + + logrus.WithField("featureGates", strings.Join(sortedEnabledGates, ", ")). + Debugf("Discovered %d enabled feature gates", len(sortedEnabledGates)) + + sortedDisabledGates := disabled.UnsortedList() + slices.Sort(sortedDisabledGates) + + logrus.WithField("featureGates", strings.Join(sortedDisabledGates, ", ")). + Debugf("Discovered %d disabled feature gates", len(sortedDisabledGates)) + + return enabled, disabled, nil } // DiscoverClusterState creates a ClusterState based on a live cluster @@ -156,6 +241,24 @@ func DiscoverClusterState(clientConfig *rest.Config) (*ClusterState, error) { state.Version = clusterVersion state.OptionalCapabilities = clusterVersion.Status.Capabilities.EnabledCapabilities + // Discover available API groups + state.APIGroups, err = discoverAPIGroups(coreClient) + if err != nil { + return nil, errors.WithMessage(err, "encountered an error while discovering API groups") + } + + // Discover feature gates + if state.APIGroups.Has("config.openshift.io") { + state.EnabledFeatureGates, state.DisabledFeatureGates, err = discoverFeatureGates(configClient, clusterVersion) + if err != nil { + logrus.WithError(err).Warn("ignoring error from discoverFeatureGates") + } + } else { + state.EnabledFeatureGates = sets.New[string]() + state.DisabledFeatureGates = sets.New[string]() + logrus.Infof("config.openshift.io API group not found, skipping feature gate discovery") + } + return state, nil } @@ -271,63 +374,10 @@ func LoadConfig(state *ClusterState) (*ClusterConfiguration, error) { // have to scan MachineConfig objects to figure this out? For now, callers can // can just manually override with --provider... - return config, nil -} - -// MatchFn returns a function that tests if a named function should be run based on -// the cluster configuration -func (c *ClusterConfiguration) MatchFn() func(string) bool { - var skips []string - skips = append(skips, fmt.Sprintf("[Skipped:%s]", c.ProviderName)) - - if c.IsIBMROKS { - skips = append(skips, "[Skipped:ibmroks]") - } - if c.NetworkPlugin != "" { - skips = append(skips, fmt.Sprintf("[Skipped:Network/%s]", c.NetworkPlugin)) - if c.NetworkPluginMode != "" { - skips = append(skips, fmt.Sprintf("[Skipped:Network/%s/%s]", c.NetworkPlugin, c.NetworkPluginMode)) - } - } - - if c.Disconnected { - skips = append(skips, "[Skipped:Disconnected]") - } + // Copy API groups and feature gates from cluster state + config.APIGroups = state.APIGroups + config.EnabledFeatureGates = state.EnabledFeatureGates + config.DisabledFeatureGates = state.DisabledFeatureGates - if c.IsProxied { - skips = append(skips, "[Skipped:Proxy]") - } - - if c.SingleReplicaTopology { - skips = append(skips, "[Skipped:SingleReplicaTopology]") - } - - if !c.HasIPv4 { - skips = append(skips, "[Feature:Networking-IPv4]") - } - if !c.HasIPv6 { - skips = append(skips, "[Feature:Networking-IPv6]") - } - if !c.HasIPv4 || !c.HasIPv6 { - // lack of "]" is intentional; this matches multiple tags - skips = append(skips, "[Feature:IPv6DualStack") - } - - if !c.HasSCTP { - skips = append(skips, "[Feature:SCTPConnectivity]") - } - - if c.HasNoOptionalCapabilities { - skips = append(skips, "[Skipped:NoOptionalCapabilities]") - } - - matchFn := func(name string) bool { - for _, skip := range skips { - if strings.Contains(name, skip) { - return false - } - } - return true - } - return matchFn + return config, nil } diff --git a/pkg/clioptions/clusterdiscovery/provider.go b/pkg/clioptions/clusterdiscovery/provider.go index 8389a6ea090b..b5c3a5a6a36e 100644 --- a/pkg/clioptions/clusterdiscovery/provider.go +++ b/pkg/clioptions/clusterdiscovery/provider.go @@ -9,9 +9,10 @@ import ( "github.com/onsi/gomega" "github.com/sirupsen/logrus" - "github.com/openshift/origin/test/extended/util/image" e2e "k8s.io/kubernetes/test/e2e/framework" + "github.com/openshift/origin/test/extended/util/image" + exutil "github.com/openshift/origin/test/extended/util" // Initialize baremetal as a provider _ "github.com/openshift/origin/test/extended/util/baremetal" @@ -104,7 +105,7 @@ func DecodeProvider(providerTypeOrJSON string, dryRun, discover bool, clusterSta "dryRun": dryRun, "discover": discover, "clusterState": clusterState, - }).Info("Decoding provider") + }).Debug("Decoding provider") switch providerTypeOrJSON { case "none": config := &ClusterConfiguration{ @@ -193,7 +194,9 @@ func DecodeProvider(providerTypeOrJSON string, dryRun, discover bool, clusterSta if clusterState != nil { var err error config, err = LoadConfig(clusterState) - log.WithError(err).Warn("ignoring error from LoadConfig for discovery") + if err != nil { + log.WithError(err).Warn("ignoring error from LoadConfig for discovery") + } } } if config == nil { diff --git a/pkg/clioptions/clusterdiscovery/provider_test.go b/pkg/clioptions/clusterdiscovery/provider_test.go deleted file mode 100644 index 2334b0f00599..000000000000 --- a/pkg/clioptions/clusterdiscovery/provider_test.go +++ /dev/null @@ -1,342 +0,0 @@ -package clusterdiscovery - -import ( - "net/url" - "os" - "testing" - - configv1 "github.com/openshift/api/config/v1" - operatorv1 "github.com/openshift/api/operator/v1" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -var gcePlatform = &configv1.PlatformStatus{ - Type: configv1.GCPPlatformType, - GCP: &configv1.GCPPlatformStatus{ - ProjectID: "openshift-gce-devel-ci", - Region: "us-east1", - }, -} - -var awsPlatform = &configv1.PlatformStatus{ - Type: configv1.AWSPlatformType, - AWS: &configv1.AWSPlatformStatus{ - Region: "us-east-2", - }, -} - -var vspherePlatform = &configv1.PlatformStatus{ - Type: configv1.VSpherePlatformType, -} - -var alibabaPlatform = &configv1.PlatformStatus{ - Type: configv1.AlibabaCloudPlatformType, - AlibabaCloud: &configv1.AlibabaCloudPlatformStatus{ - Region: "us-east-1", - }, -} - -var noPlatform = &configv1.PlatformStatus{ - Type: configv1.NonePlatformType, -} - -var gceMasters = &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-1", - Labels: map[string]string{ - "failure-domain.beta.kubernetes.io/zone": "us-east1-a", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-2", - Labels: map[string]string{ - "failure-domain.beta.kubernetes.io/zone": "us-east1-b", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-3", - Labels: map[string]string{ - "failure-domain.beta.kubernetes.io/zone": "us-east1-c", - }, - }, - }, - }, -} - -var simpleMasters = &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-2", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "master-3", - }, - }, - }, -} - -var nonMasters = &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "worker-1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "worker-2", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "worker-3", - }, - }, - }, -} - -var sdnConfig = &operatorv1.NetworkSpec{ - DefaultNetwork: operatorv1.DefaultNetworkDefinition{ - Type: operatorv1.NetworkTypeOpenShiftSDN, - OpenShiftSDNConfig: &operatorv1.OpenShiftSDNConfig{}, - }, - ServiceNetwork: []string{"172.30.0.0/16"}, -} - -var multitenantConfig = &operatorv1.NetworkSpec{ - DefaultNetwork: operatorv1.DefaultNetworkDefinition{ - Type: operatorv1.NetworkTypeOpenShiftSDN, - OpenShiftSDNConfig: &operatorv1.OpenShiftSDNConfig{ - Mode: operatorv1.SDNModeMultitenant, - }, - }, - ServiceNetwork: []string{"172.30.0.0/16"}, -} - -var ovnKubernetesConfig = &operatorv1.NetworkSpec{ - DefaultNetwork: operatorv1.DefaultNetworkDefinition{ - Type: operatorv1.NetworkTypeOVNKubernetes, - }, - ServiceNetwork: []string{ - "172.30.0.0/16", - "fd02::/112", - }, -} - -var ovnKubernetesConfigIPv6Primary = &operatorv1.NetworkSpec{ - DefaultNetwork: operatorv1.DefaultNetworkDefinition{ - Type: operatorv1.NetworkTypeOVNKubernetes, - }, - ServiceNetwork: []string{ - "fd02::/112", - "172.30.0.0/16", - }, -} - -var e2eTests = map[string]string{ - "everyone": "[Skipped:Wednesday]", - "not-gce": "[Skipped:gce]", - "not-aws": "[Skipped:aws]", - "not-sdn": "[Skipped:Network/OpenShiftSDN]", - "not-multitenant": "[Skipped:Network/OpenShiftSDN/Multitenant]", - "online": "[Skipped:Disconnected]", - "ipv4": "[Feature:Networking-IPv4]", - "ipv6": "[Feature:Networking-IPv6]", - "dual-stack": "[Feature:IPv6DualStackAlpha]", - "sctp": "[Feature:SCTPConnectivity]", - "requires-optional-cap": "[Skipped:NoOptionalCapabilities]", -} - -func TestDecodeProvider(t *testing.T) { - var testCases = []struct { - name string - provider string - - discoveredPlatform *configv1.PlatformStatus - discoveredMasters *corev1.NodeList - discoveredNetwork *operatorv1.NetworkSpec - optionalCapabilities []configv1.ClusterVersionCapability - - expectedConfig string - runTests sets.String - }{ - { - name: "simple GCE", - provider: "", - discoveredPlatform: gcePlatform, - discoveredMasters: gceMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"gce","ProjectID":"openshift-gce-devel-ci","Region":"us-east1","Zone":"us-east1-a","NumNodes":3,"MultiMaster":true,"MultiZone":true,"Zones":["us-east1-a","us-east1-b","us-east1-c"],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "GCE multitenant", - provider: "", - discoveredPlatform: gcePlatform, - discoveredMasters: gceMasters, - discoveredNetwork: multitenantConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"gce","ProjectID":"openshift-gce-devel-ci","Region":"us-east1","Zone":"us-east1-a","NumNodes":3,"MultiMaster":true,"MultiZone":true,"Zones":["us-east1-a","us-east1-b","us-east1-c"],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","NetworkPluginMode":"Multitenant","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-aws", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "simple non-cloud", - provider: "", - discoveredPlatform: noPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"skeleton","ProjectID":"","Region":"","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "simple override", - provider: "vsphere", - discoveredPlatform: vspherePlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - // NB: It does not actually use the passed-in Provider value - expectedConfig: `{"type":"skeleton","ProjectID":"","Region":"","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "simple AlibabaCloud", - provider: "alibabacloud", - discoveredPlatform: alibabaPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"skeleton","ProjectID":"","Region":"","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "json simple override", - provider: `{"type": "openstack"}`, - discoveredPlatform: noPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"openstack","ProjectID":"","Region":"","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), - }, - { - name: "complex override dual-stack", - provider: `{"type":"aws","region":"us-east-2","zone":"us-east-2a","multimaster":false,"multizone":true}`, - discoveredPlatform: awsPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: ovnKubernetesConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"aws","ProjectID":"","Region":"us-east-2","Zone":"us-east-2a","NumNodes":3,"MultiMaster":false,"MultiZone":true,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OVNKubernetes","HasIPv4":true,"HasIPv6":true,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "ipv4", "ipv6", "dual-stack", "requires-optional-cap"), - }, - { - name: "complex override dual-stack IPv6 Primary", - provider: `{"type":"aws","region":"us-east-2","zone":"us-east-2a","multimaster":false,"multizone":true}`, - discoveredPlatform: awsPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: ovnKubernetesConfigIPv6Primary, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"aws","ProjectID":"","Region":"us-east-2","Zone":"us-east-2a","NumNodes":3,"MultiMaster":false,"MultiZone":true,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OVNKubernetes","HasIPv4":true,"HasIPv6":true,"IPFamily":"ipv6","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "ipv4", "ipv6", "dual-stack", "requires-optional-cap"), - }, - { - name: "complex override without discovery", - provider: `{"type":"aws","region":"us-east-2","zone":"us-east-2a","multimaster":false,"multizone":true}`, - discoveredPlatform: nil, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"aws","ProjectID":"","Region":"us-east-2","Zone":"us-east-2a","NumNodes":0,"MultiMaster":false,"MultiZone":true,"Zones":null,"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"","HasIPv4":false,"HasIPv6":false,"IPFamily":"","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "requires-optional-cap"), - }, - { - name: "disconnected", - provider: `{"type":"none","disconnected":true}`, - discoveredPlatform: noPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: ovnKubernetesConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"none","ProjectID":"","Region":"","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":true,"SingleReplicaTopology":false,"NetworkPlugin":"OVNKubernetes","HasIPv4":true,"HasIPv6":true,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-aws", "not-sdn", "not-multitenant", "ipv4", "ipv6", "dual-stack", "requires-optional-cap"), - }, - { - name: "override network plugin", - provider: `{"type":"aws","networkPlugin":"Calico","hasIPv4":false,"hasIPv6":true,"ipFamily":"ipv6","hasSCTP":true}`, - discoveredPlatform: awsPlatform, - discoveredMasters: simpleMasters, - discoveredNetwork: ovnKubernetesConfig, - optionalCapabilities: configv1.KnownClusterVersionCapabilities, - expectedConfig: `{"type":"aws","ProjectID":"","Region":"us-east-2","Zone":"","NumNodes":3,"MultiMaster":true,"MultiZone":false,"Zones":[],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"Calico","HasIPv4":false,"HasIPv6":true,"IPFamily":"ipv6","HasSCTP":true,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":false}`, - runTests: sets.NewString("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "ipv6", "sctp", "requires-optional-cap"), - }, - { - name: "no optional capabilities", - provider: "", - discoveredPlatform: gcePlatform, - discoveredMasters: gceMasters, - discoveredNetwork: sdnConfig, - optionalCapabilities: []configv1.ClusterVersionCapability{}, - expectedConfig: `{"type":"gce","ProjectID":"openshift-gce-devel-ci","Region":"us-east1","Zone":"us-east1-a","NumNodes":3,"MultiMaster":true,"MultiZone":true,"Zones":["us-east1-a","us-east1-b","us-east1-c"],"ConfigFile":"","Disconnected":false,"SingleReplicaTopology":false,"NetworkPlugin":"OpenShiftSDN","HasIPv4":true,"HasIPv6":false,"IPFamily":"ipv4","HasSCTP":false,"IsProxied":false,"IsIBMROKS":false,"HasNoOptionalCapabilities":true}`, - runTests: sets.NewString("everyone", "not-aws", "not-multitenant", "online", "ipv4"), - }, - } - - // Unset these to keep DecodeProvider from returning "local" - os.Unsetenv("KUBE_SSH_USER") - os.Unsetenv("LOCAL_SSH_KEY") - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - discover := tc.discoveredPlatform != nil - var testState *ClusterState - if discover { - topology := configv1.HighlyAvailableTopologyMode - testURL, _ := url.Parse("https://example.com") - testState = &ClusterState{ - PlatformStatus: tc.discoveredPlatform, - Masters: tc.discoveredMasters, - NonMasters: nonMasters, - NetworkSpec: tc.discoveredNetwork, - ControlPlaneTopology: &topology, - APIURL: testURL, - OptionalCapabilities: tc.optionalCapabilities, - } - } - config, err := DecodeProvider(tc.provider, false, discover, testState) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - configJSON := config.ToJSONString() - require.Equal(t, tc.expectedConfig, configJSON) - matchFn := config.MatchFn() - - runTests := sets.NewString() - for name, tags := range e2eTests { - if matchFn(name + " " + tags) { - runTests.Insert(name) - } - } - if !runTests.Equal(tc.runTests) { - t.Fatalf("Matched tests:\n%v\ndid not match expected:\n%v\n", runTests.List(), tc.runTests.List()) - } - }) - } -} diff --git a/pkg/clioptions/suiteselection/feature_filter.go b/pkg/clioptions/suiteselection/feature_filter.go deleted file mode 100644 index 92122b2f693f..000000000000 --- a/pkg/clioptions/suiteselection/feature_filter.go +++ /dev/null @@ -1,86 +0,0 @@ -package suiteselection - -import ( - "context" - "fmt" - "regexp" - - clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -type featureGateFilter struct { - enabled sets.String - disabled sets.String -} - -func newFeatureGateFilter(ctx context.Context, configClient clientconfigv1.Interface) (*featureGateFilter, error) { - featureGate, err := configClient.ConfigV1().FeatureGates().Get(ctx, "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err - } - clusterVersion, err := configClient.ConfigV1().ClusterVersions().Get(ctx, "version", metav1.GetOptions{}) - if err != nil { - return nil, err - } - - desiredVersion := clusterVersion.Status.Desired.Version - if len(desiredVersion) == 0 && len(clusterVersion.Status.History) > 0 { - desiredVersion = clusterVersion.Status.History[0].Version - } - - ret := &featureGateFilter{ - enabled: sets.NewString(), - disabled: sets.NewString(), - } - found := false - for _, featureGateValues := range featureGate.Status.FeatureGates { - if featureGateValues.Version != desiredVersion { - continue - } - found = true - for _, enabled := range featureGateValues.Enabled { - ret.enabled.Insert(string(enabled.Name)) - } - for _, disabled := range featureGateValues.Disabled { - ret.disabled.Insert(string(disabled.Name)) - } - break - } - if !found { - return nil, fmt.Errorf("no featuregates found") - } - - return ret, nil -} - -func (f *featureGateFilter) includeTest(name string) bool { - featureGates := []string{} - matches := featureGateRegex.FindAllStringSubmatch(name, -1) - for _, match := range matches { - if len(match) < 2 { - panic(fmt.Errorf("regexp match %v is invalid: len(match) < 2 for %v", match, name)) - } - featureGate := match[1] - featureGates = append(featureGates, featureGate) - } - - if f.disabled.HasAny(featureGates...) { - return false - } - - // It is important that we always return true if we don't know the status of the gate. - // This generally means we have no opinion on whether the feature is on or off. - // We expect the default case to be on, as this is what would happen after a feature is promoted, - // and the gate is removed. - return true -} - -func includeNonFeatureGateTest(name string) bool { - return featureGateRegex.FindAllStringSubmatch(name, -1) == nil -} - -var ( - featureGateRegex = regexp.MustCompile(`\[OCPFeatureGate:([^]]*)\]`) -) diff --git a/pkg/clioptions/suiteselection/group_filter.go b/pkg/clioptions/suiteselection/group_filter.go deleted file mode 100644 index 56ab1272b57e..000000000000 --- a/pkg/clioptions/suiteselection/group_filter.go +++ /dev/null @@ -1,51 +0,0 @@ -package suiteselection - -import ( - "fmt" - "regexp" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/discovery" -) - -type apiGroupFilter struct { - apiGroups sets.String -} - -func newApiGroupFilter(discoveryClient discovery.AggregatedDiscoveryInterface) (*apiGroupFilter, error) { - // Check if the groups is served by the server - groups, err := discoveryClient.ServerGroups() - if err != nil { - return nil, fmt.Errorf("unable to retrieve served resources: %v", err) - } - apiGroups := sets.NewString() - for _, apiGroup := range groups.Groups { - // ignore the empty group - if apiGroup.Name == "" { - continue - } - apiGroups.Insert(apiGroup.Name) - } - - return &apiGroupFilter{ - apiGroups: apiGroups, - }, nil -} - -var ( - apiGroupRegex = regexp.MustCompile(`\[apigroup:([^]]*)\]`) -) - -func (agf *apiGroupFilter) includeTest(name string) bool { - apiGroups := []string{} - matches := apiGroupRegex.FindAllStringSubmatch(name, -1) - for _, match := range matches { - if len(match) < 2 { - panic(fmt.Errorf("regexp match %v is invalid: len(match) < 2 for %v", match, name)) - } - apigroup := match[1] - apiGroups = append(apiGroups, apigroup) - } - - return agf.apiGroups.HasAll(apiGroups...) -} diff --git a/pkg/clioptions/suiteselection/suite_flags.go b/pkg/clioptions/suiteselection/suite_flags.go index 3b7dc17056ed..2dcba917d898 100644 --- a/pkg/clioptions/suiteselection/suite_flags.go +++ b/pkg/clioptions/suiteselection/suite_flags.go @@ -2,7 +2,6 @@ package suiteselection import ( "bytes" - "context" "fmt" "io/ioutil" "os" @@ -12,8 +11,6 @@ import ( "strings" clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/discovery" testginkgo "github.com/openshift/origin/pkg/test/ginkgo" @@ -37,8 +34,6 @@ type TestSuiteSelectionFlags struct { // Regex allows a selection of a subset of tests Regex string - // MatchFn if set is also used to filter the suite contents - MatchFn testginkgo.TestMatchFunc genericclioptions.IOStreams } @@ -65,18 +60,14 @@ func (f *TestSuiteSelectionFlags) SetIOStreams(streams genericclioptions.IOStrea // SelectSuite returns the defined suite plus the requested modifications to the suite in order to select the specified tests func (f *TestSuiteSelectionFlags) SelectSuite( suites []*testginkgo.TestSuite, - args []string, - discoveryClientGetter DiscoveryClientGetter, - configClientGetter ConfigClientGetter, - dryRun bool, - additionalMatchFn testginkgo.TestMatchFunc, -) (*testginkgo.TestSuite, error) { + args []string) (*testginkgo.TestSuite, error) { var suite *testginkgo.TestSuite // If a test file was provided with no suite, use the "files" suite. if len(f.TestFile) > 0 && len(args) == 0 { suite = &testginkgo.TestSuite{ Name: "files", + Kind: testginkgo.KindInternal, } } if suite == nil && len(args) == 0 { @@ -110,59 +101,10 @@ func (f *TestSuiteSelectionFlags) SelectSuite( suite.AddRequiredMatchFunc(re.MatchString) } - suite.AddRequiredMatchFunc(f.MatchFn) - suite.AddRequiredMatchFunc(additionalMatchFn) - - // Skip tests with [apigroup:GROUP] labels for apigroups which are not - // served by a cluster. E.g. MicroShift is not serving most of the openshift.io - // apigroups. Other installations might be serving only a subset of the api groups. - discoveryClient, err := discoveryClientGetter.GetDiscoveryClient() - switch { - case err != nil && dryRun: - fmt.Fprintf(f.ErrOut, "Unable to get discovery client, skipping apigroup check in the dry-run mode: %v\n", err) - case err != nil && !dryRun: - return nil, fmt.Errorf("unable to get discovery client, skipping apigroup check in the dry-run mode: %w", err) - - default: - _, serverVersionErr := discoveryClient.ServerVersion() - switch { - case serverVersionErr != nil && dryRun: - fmt.Fprintf(f.ErrOut, "Unable to get server version through discovery client, skipping apigroup check in the dry-run mode: %v\n", err) - case serverVersionErr != nil && !dryRun: - return nil, fmt.Errorf("unable to get server version through discovery client, skipping apigroup check in the dry-run mode: %w", err) - default: - apiGroupFilter, err := newApiGroupFilter(discoveryClient) - if err != nil { - return nil, fmt.Errorf("unable to build api group filter: %w", err) - } - suite.AddRequiredMatchFunc(apiGroupFilter.includeTest) - } - } - - configClient, err := configClientGetter.GetConfigClient() - switch { - case err != nil && dryRun: - fmt.Fprintf(f.ErrOut, "Unable to get config client, skipping FeatureGate check in the dry-run mode: %v\n", err) - case err != nil && !dryRun: - return nil, fmt.Errorf("unable to get config client, skipping FeatureGate check in the dry-run mode: %w", err) - default: - featureGateFilter, err := newFeatureGateFilter(context.TODO(), configClient) - switch { - case apierrors.IsNotFound(err): - // In case we are unable to determine if there is support for feature gates, exclude all featuregated tests - // as the test target doesnt comply with preconditions. - suite.AddRequiredMatchFunc(includeNonFeatureGateTest) - case err != nil: - return nil, fmt.Errorf("unable to build FeatureGate filter: %w", err) - default: - suite.AddRequiredMatchFunc(featureGateFilter.includeTest) - } - } - return suite, nil } -// If a test file was provided, override the Matches function +// If a test file was provided, override the SuiteMatcher function // to match the tests from both the suite and the file. func (f *TestSuiteSelectionFlags) testFileMatchFunc() (testginkgo.TestMatchFunc, error) { if len(f.TestFile) == 0 { diff --git a/pkg/cmd/openshift-tests/list/extensions.go b/pkg/cmd/openshift-tests/list/extensions.go new file mode 100644 index 000000000000..f3a5b9e38c97 --- /dev/null +++ b/pkg/cmd/openshift-tests/list/extensions.go @@ -0,0 +1,148 @@ +package list + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/kubectl/pkg/util/templates" + + origincmd "github.com/openshift/origin/pkg/cmd" + "github.com/openshift/origin/pkg/test/extensions" +) + +func NewListExtensionsCommand(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "extensions", + Short: "List available test extensions", + Long: templates.LongDesc(` + List all available test extensions that provide additional tests. + + This command extracts and queries all external test binaries from the release + payload to display information about available extensions, including their + components, source information, and advertised test suites. + `), + SilenceUsage: true, + SilenceErrors: true, + PreRunE: origincmd.RequireClusterAccess, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) > 0 { + return fmt.Errorf("OPENSHIFT_SKIP_EXTERNAL_TESTS is set, cannot list extensions") + } + + // Get output format flag + const flag = "output" + outputFormat, err := cmd.Flags().GetString(flag) + if err != nil { + return errors.Wrapf(err, "error accessing flag %s for command %s", flag, cmd.Name()) + } + + // Extract all test binaries from the release payload + cleanup, binaries, err := extensions.ExtractAllTestBinaries(ctx, 10) + if err != nil { + return fmt.Errorf("failed to extract test binaries: %w", err) + } + defer cleanup() + + if len(binaries) == 0 { + switch outputFormat { + case "json": + fmt.Fprint(streams.Out, "[]\n") + case "yaml": + fmt.Fprint(streams.Out, "[]\n") + default: + fmt.Fprint(streams.Out, "No test extensions found.\n") + } + return nil + } + + // Get info from all binaries + logrus.Infof("Fetching info from %d extension binaries", len(binaries)) + infos, err := binaries.Info(ctx, 4) + if err != nil { + logrus.Errorf("Failed to get extension info: %v", err) + return fmt.Errorf("failed to get extension info: %w", err) + } + + // Output in the requested format + switch outputFormat { + case "": + // Default human-readable format + fmt.Fprintf(streams.Out, "Available test extensions:\n\n") + for _, info := range infos { + printExtensionInfo(streams.Out, info) + } + case "json": + jsonData, err := json.MarshalIndent(infos, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal extensions to JSON: %w", err) + } + fmt.Fprintln(streams.Out, string(jsonData)) + case "yaml": + yamlData, err := yaml.Marshal(infos) + if err != nil { + return fmt.Errorf("failed to marshal extensions to YAML: %w", err) + } + fmt.Fprintln(streams.Out, string(yamlData)) + default: + return errors.Errorf("invalid output format: %s", outputFormat) + } + + return nil + }, + } + + cmd.Flags().StringP("output", "o", "", "Output format; available options are 'yaml' and 'json'") + return cmd +} + +func printExtensionInfo(out io.Writer, info *extensions.Extension) { + fmt.Fprintf(out, "%s:%s:%s\n", info.Component.Product, info.Component.Kind, info.Component.Name) + fmt.Fprintf(out, " API Version: %s\n", info.APIVersion) + + if info.Source.SourceBinary != "" { + fmt.Fprintf(out, " Binary: %s\n", info.Source.SourceBinary) + } + + if info.Source.SourceImage != "" { + fmt.Fprintf(out, " Source Image: %s\n", info.Source.SourceImage) + } + + if info.Source.SourceURL != "" { + fmt.Fprintf(out, " Source URL: %s\n", info.Source.SourceURL) + } + + if info.Source.Commit != "" { + fmt.Fprintf(out, " Commit: %s", info.Source.Commit) + if info.Source.GitTreeState != "" && info.Source.GitTreeState != "clean" { + fmt.Fprintf(out, " (%s)", info.Source.GitTreeState) + } + fmt.Fprintf(out, "\n") + } + + if info.Source.BuildDate != "" { + fmt.Fprintf(out, " Build Date: %s\n", info.Source.BuildDate) + } + + if len(info.Suites) > 0 { + fmt.Fprintf(out, " Advertised Suites:\n") + for _, suite := range info.Suites { + fmt.Fprintf(out, " - %s", suite.Name) + if len(suite.Parents) > 0 { + fmt.Fprintf(out, " (parents: %s)", strings.Join(suite.Parents, ", ")) + } + fmt.Fprintf(out, "\n") + } + } + + fmt.Fprintf(out, "\n") +} diff --git a/pkg/cmd/openshift-tests/list/root.go b/pkg/cmd/openshift-tests/list/root.go new file mode 100644 index 000000000000..4bc85953e680 --- /dev/null +++ b/pkg/cmd/openshift-tests/list/root.go @@ -0,0 +1,28 @@ +package list + +import ( + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func NewListCommand(streams genericclioptions.IOStreams, extensionRegistry *extension.Registry) *cobra.Command { + + oteListCmd := cmdlist.NewListCommand(extensionRegistry) + + // Remove OTE's own suites command (maybe put it back later, if we can register all the + // extension suites here too). + for _, c := range oteListCmd.Commands() { + if c.Use == "suites" { + oteListCmd.RemoveCommand(c) + } + } + + oteListCmd.AddCommand( + NewListSuitesCommand(streams), + NewListExtensionsCommand(streams), + ) + + return oteListCmd +} diff --git a/pkg/cmd/openshift-tests/list/suites.go b/pkg/cmd/openshift-tests/list/suites.go new file mode 100644 index 000000000000..f3c0b6b19e8b --- /dev/null +++ b/pkg/cmd/openshift-tests/list/suites.go @@ -0,0 +1,83 @@ +package list + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/kubectl/pkg/util/templates" + + origincmd "github.com/openshift/origin/pkg/cmd" + "github.com/openshift/origin/pkg/testsuites" +) + +func NewListSuitesCommand(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "suites", + Short: "List available test suites", + Long: templates.LongDesc(` + List all available test suites that can be run with the 'run' command. + + This command displays the names and descriptions of all test suites available + in openshift-tests. Use the suite names with the 'run' command to execute + specific test suites. + `), + SilenceUsage: true, + SilenceErrors: true, + PreRunE: func(c *cobra.Command, args []string) error { + if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) == 0 { + return origincmd.RequireClusterAccess(c, args) + } + + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + // Get output format flag + const flag = "output" + outputFormat, err := cmd.Flags().GetString(flag) + if err != nil { + return errors.Wrapf(err, "error accessing flag %s for command %s", flag, cmd.Name()) + } + + ctx := context.TODO() + + // Get all test suites (internal + extensions) with validation + suites, err := testsuites.AllTestSuites(ctx) + if err != nil { + return err + } + + // Output in the requested format + switch outputFormat { + case "": + // Default human-readable format + output := testsuites.SuitesString(suites, "Available test suites:\n\n") + fmt.Fprint(streams.Out, output) + case "json": + jsonData, err := json.MarshalIndent(suites, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal suites to JSON: %w", err) + } + fmt.Fprintln(streams.Out, string(jsonData)) + case "yaml": + yamlData, err := yaml.Marshal(suites) + if err != nil { + return fmt.Errorf("failed to marshal suites to YAML: %w", err) + } + fmt.Fprintln(streams.Out, string(yamlData)) + default: + return errors.Errorf("invalid output format: %s", outputFormat) + } + + return nil + }, + } + + cmd.Flags().StringP("output", "o", "", "Output format; available options are 'yaml' and 'json'") + return cmd +} diff --git a/pkg/cmd/openshift-tests/run-test/command.go b/pkg/cmd/openshift-tests/run-test/command.go deleted file mode 100644 index 2e56435e403a..000000000000 --- a/pkg/cmd/openshift-tests/run-test/command.go +++ /dev/null @@ -1,82 +0,0 @@ -package run_test - -import ( - "fmt" - "github.com/openshift/origin/pkg/defaultmonitortests" - "os" - "strings" - - "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" - "github.com/openshift/origin/pkg/clioptions/imagesetup" - "github.com/openshift/origin/pkg/clioptions/upgradeoptions" - testginkgo "github.com/openshift/origin/pkg/test/ginkgo" - exutil "github.com/openshift/origin/test/extended/util" - "github.com/openshift/origin/test/extended/util/image" - "github.com/spf13/cobra" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/klog/v2" - "k8s.io/kubectl/pkg/util/templates" -) - -func NewRunTestCommand(streams genericclioptions.IOStreams) *cobra.Command { - testOpt := testginkgo.NewTestOptions(streams) - monitorNames := defaultmonitortests.ListAllMonitorTests() - - cmd := &cobra.Command{ - Use: "run-test NAME", - Short: "Run a single test by name", - Long: templates.LongDesc(` - Execute a single test - - This executes a single test by name. It is used by the run command during suite execution but may also - be used to test in isolation while developing new tests. - `), - - SilenceUsage: true, - SilenceErrors: true, - RunE: func(cmd *cobra.Command, args []string) error { - if v := os.Getenv("TEST_LOG_LEVEL"); len(v) > 0 { - cmd.Flags().Lookup("v").Value.Set(v) - } - - // set globals so that helpers will create pods with the mapped images if we create them from this process. - // we cannot eliminate the env var usage until we convert run-test, which we may be able to do in a followup. - image.InitializeImages(os.Getenv("KUBE_TEST_REPO")) - - if err := imagesetup.VerifyImages(); err != nil { - return err - } - - config, err := clusterdiscovery.DecodeProvider(os.Getenv("TEST_PROVIDER"), testOpt.DryRun, false, nil) - if err != nil { - return err - } - if err := clusterdiscovery.InitializeTestFramework(exutil.TestContext, config, testOpt.DryRun); err != nil { - return err - } - klog.V(4).Infof("Loaded test configuration: %#v", exutil.TestContext) - - exutil.TestContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") - - // allow upgrade test to pass some parameters here, although this may be - // better handled as an env var within the test itself in the future - upgradeOptionsYAML := os.Getenv("TEST_UPGRADE_OPTIONS") - upgradeOptions, err := upgradeoptions.NewUpgradeOptionsFromYAML(upgradeOptionsYAML) - if err != nil { - return err - } - // TODO this is called from run-upgrade and run-test. At least one of these ought not need it. - if err := upgradeOptions.SetUpgradeGlobals(); err != nil { - return err - } - - exutil.WithCleanup(func() { err = testOpt.Run(args) }) - return err - }, - } - cmd.Flags().BoolVar(&testOpt.DryRun, "dry-run", testOpt.DryRun, "Print the test to run without executing them.") - cmd.Flags().StringSliceVar(&testOpt.ExactMonitorTests, "monitor", testOpt.ExactMonitorTests, - fmt.Sprintf("list of exactly which monitors to enable. All others will be disabled. Current monitors are: [%s]", strings.Join(monitorNames, ", "))) - cmd.Flags().StringSliceVar(&testOpt.DisableMonitorTests, "disable-monitor", testOpt.DisableMonitorTests, "list of monitors to disable. Defaults for others will be honored.") - return cmd -} diff --git a/pkg/cmd/openshift-tests/run-upgrade/flags.go b/pkg/cmd/openshift-tests/run-upgrade/flags.go index 86be6eeab09a..00be9050bf5a 100644 --- a/pkg/cmd/openshift-tests/run-upgrade/flags.go +++ b/pkg/cmd/openshift-tests/run-upgrade/flags.go @@ -2,13 +2,14 @@ package run_upgrade import ( "fmt" + + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericclioptions" + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" "github.com/openshift/origin/pkg/clioptions/iooptions" - "github.com/openshift/origin/pkg/clioptions/kubeconfig" "github.com/openshift/origin/pkg/clioptions/suiteselection" testginkgo "github.com/openshift/origin/pkg/test/ginkgo" - "github.com/spf13/pflag" - "k8s.io/cli-runtime/pkg/genericclioptions" ) // TODO collapse this with cmd_runsuite @@ -60,11 +61,6 @@ func (f *RunUpgradeSuiteFlags) SetIOStreams(streams genericclioptions.IOStreams) } func (f *RunUpgradeSuiteFlags) ToOptions(args []string) (*RunUpgradeSuiteOptions, error) { - adminRESTConfig, err := kubeconfig.GetStaticRESTConfig() - if err != nil { - return nil, err - } - closeFn, err := f.OutputFlags.ConfigureIOStreams(f.IOStreams, f) if err != nil { return nil, err @@ -82,12 +78,7 @@ func (f *RunUpgradeSuiteFlags) ToOptions(args []string) (*RunUpgradeSuiteOptions suite, err := f.TestSuiteSelectionFlags.SelectSuite( f.AvailableSuites, - args, - kubeconfig.NewDiscoveryGetter(adminRESTConfig), - kubeconfig.NewConfigClientGetter(adminRESTConfig), - f.GinkgoRunSuiteOptions.DryRun, - nil, - ) + args) if err != nil { return nil, err } diff --git a/pkg/cmd/openshift-tests/run-upgrade/options.go b/pkg/cmd/openshift-tests/run-upgrade/options.go index 7c75cc63aff5..34f30c4db519 100644 --- a/pkg/cmd/openshift-tests/run-upgrade/options.go +++ b/pkg/cmd/openshift-tests/run-upgrade/options.go @@ -8,6 +8,10 @@ import ( "github.com/openshift/origin/pkg/monitortestframework" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/klog/v2" + k8simage "k8s.io/kubernetes/test/utils/image" + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" "github.com/openshift/origin/pkg/clioptions/imagesetup" "github.com/openshift/origin/pkg/clioptions/iooptions" @@ -16,9 +20,6 @@ import ( "github.com/openshift/origin/pkg/version" exutil "github.com/openshift/origin/test/extended/util" "github.com/openshift/origin/test/extended/util/image" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/klog/v2" - k8simage "k8s.io/kubernetes/test/utils/image" ) // TODO collapse this with cmd_runsuite @@ -117,7 +118,7 @@ func (o *RunUpgradeSuiteOptions) Run(ctx context.Context) error { if !o.GinkgoRunSuiteOptions.DryRun { fmt.Fprintf(os.Stderr, "%s version: %s\n", filepath.Base(os.Args[0]), version.Get().String()) } - exitErr := o.GinkgoRunSuiteOptions.Run(o.Suite, "openshift-tests-upgrade", monitorTestInfo, true) + exitErr := o.GinkgoRunSuiteOptions.Run(o.Suite, nil, "openshift-tests-upgrade", monitorTestInfo, true) if exitErr != nil { fmt.Fprintf(os.Stderr, "Suite run returned error: %s\n", exitErr.Error()) } diff --git a/pkg/cmd/openshift-tests/run/command.go b/pkg/cmd/openshift-tests/run/command.go index 57bdd4801e88..8d9fc1ff5fe6 100644 --- a/pkg/cmd/openshift-tests/run/command.go +++ b/pkg/cmd/openshift-tests/run/command.go @@ -2,25 +2,28 @@ package run import ( "context" - "fmt" - "github.com/openshift/origin/pkg/clioptions/imagesetup" - "github.com/openshift/origin/pkg/testsuites" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/kubectl/pkg/util/templates" + + "github.com/openshift/origin/pkg/clioptions/imagesetup" + "github.com/openshift/origin/pkg/cmd" + "github.com/openshift/origin/pkg/testsuites" ) -func NewRunCommand(streams genericclioptions.IOStreams) *cobra.Command { - f := NewRunSuiteFlags(streams, imagesetup.DefaultTestImageMirrorLocation, testsuites.StandardTestSuites()) +func NewRunCommand(streams genericclioptions.IOStreams, internalExtension *extension.Extension) *cobra.Command { + f := NewRunSuiteFlags(streams, imagesetup.DefaultTestImageMirrorLocation) - cmd := &cobra.Command{ + runCmd := &cobra.Command{ Use: "run SUITE", Short: "Run a test suite", Long: templates.LongDesc(` Run a test suite against an OpenShift server - This command will run one of the following suites against a cluster identified by the current + This command will run one of the available test suites against a cluster identified by the current KUBECONFIG file. See the suite description for more on what actions the suite will take. If you specify the --dry-run argument, the names of each individual test that is part of the @@ -28,26 +31,31 @@ func NewRunCommand(streams genericclioptions.IOStreams) *cobra.Command { command with the --file argument. You may also pipe a list of test names, one per line, on standard input by passing "-f -". - `) + testsuites.SuitesString(testsuites.StandardTestSuites(), "\n\nAvailable test suites:\n\n"), + Use 'openshift-tests list suites' to see all available test suites. + `), SilenceUsage: true, SilenceErrors: true, + PreRunE: cmd.RequireClusterAccess, RunE: func(cmd *cobra.Command, args []string) error { - o, err := f.ToOptions(args) + allSuites, err := testsuites.AllTestSuites(context.Background()) + if err != nil { + return errors.WithMessage(err, "couldn't retrieve test suites") + } + + o, err := f.ToOptions(args, allSuites, internalExtension) if err != nil { - fmt.Fprintf(f.IOStreams.ErrOut, "error converting to options: %v", err) - return err + return errors.WithMessage(err, "error converting to options") } ctx, cancel := context.WithCancel(context.Background()) defer cancel() if err := o.Run(ctx); err != nil { - fmt.Fprintf(f.IOStreams.ErrOut, "error running options: %v", err) - return err + return errors.WithMessage(err, "error running a test suite") } return nil }, } - f.BindFlags(cmd.Flags()) - return cmd + f.BindFlags(runCmd.Flags()) + return runCmd } diff --git a/pkg/cmd/openshift-tests/run/flags.go b/pkg/cmd/openshift-tests/run/flags.go index d91aff497237..106e71ce8266 100644 --- a/pkg/cmd/openshift-tests/run/flags.go +++ b/pkg/cmd/openshift-tests/run/flags.go @@ -1,16 +1,17 @@ package run import ( - "fmt" + "os" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericclioptions" + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" "github.com/openshift/origin/pkg/clioptions/iooptions" - "github.com/openshift/origin/pkg/clioptions/kubeconfig" "github.com/openshift/origin/pkg/clioptions/suiteselection" testginkgo "github.com/openshift/origin/pkg/test/ginkgo" exutil "github.com/openshift/origin/test/extended/util" - "github.com/spf13/pflag" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/rest" ) // TODO collapse this with cmd_runsuite @@ -18,7 +19,6 @@ type RunSuiteFlags struct { GinkgoRunSuiteOptions *testginkgo.GinkgoRunSuiteOptions TestSuiteSelectionFlags *suiteselection.TestSuiteSelectionFlags OutputFlags *iooptions.OutputFlags - AvailableSuites []*testginkgo.TestSuite FromRepository string ProviderTypeOrJSON string @@ -34,12 +34,11 @@ type RunSuiteFlags struct { genericclioptions.IOStreams } -func NewRunSuiteFlags(streams genericclioptions.IOStreams, fromRepository string, availableSuites []*testginkgo.TestSuite) *RunSuiteFlags { +func NewRunSuiteFlags(streams genericclioptions.IOStreams, fromRepository string) *RunSuiteFlags { return &RunSuiteFlags{ GinkgoRunSuiteOptions: testginkgo.NewGinkgoRunSuiteOptions(streams), TestSuiteSelectionFlags: suiteselection.NewTestSuiteSelectionFlags(streams), OutputFlags: iooptions.NewOutputOptions(), - AvailableSuites: availableSuites, FromRepository: fromRepository, IOStreams: streams, @@ -49,7 +48,7 @@ func NewRunSuiteFlags(streams genericclioptions.IOStreams, fromRepository string // SuiteWithKubeTestInitializationPreSuite // 1. invokes the Kube suite in order to populate data from the environment for the CSI suite (originally, but now everything). // 2. ensures that the suite filters out tests from providers that aren't relevant (see exutilcluster.ClusterConfig.MatchFn) by -// loading the provider info from the cluster or flags. +// loading the provider info from the cluster or flags, including API groups and feature gates. func (f *RunSuiteFlags) SuiteWithKubeTestInitializationPreSuite() (*clusterdiscovery.ClusterConfiguration, error) { providerConfig, err := clusterdiscovery.DecodeProvider(f.ProviderTypeOrJSON, f.GinkgoRunSuiteOptions.DryRun, true, nil) if err != nil { @@ -59,12 +58,13 @@ func (f *RunSuiteFlags) SuiteWithKubeTestInitializationPreSuite() (*clusterdisco if err := clusterdiscovery.InitializeTestFramework(exutil.TestContext, providerConfig, f.GinkgoRunSuiteOptions.DryRun); err != nil { return nil, err } + return providerConfig, nil } func (f *RunSuiteFlags) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&f.FromRepository, "from-repository", f.FromRepository, "A container image repository to retrieve test images from.") - flags.StringVar(&f.ProviderTypeOrJSON, "provider", f.ProviderTypeOrJSON, "The cluster infrastructure provider. Will automatically default to the correct value.") + flags.StringVar(&f.ProviderTypeOrJSON, "provider", os.Getenv("TEST_PROVIDER"), "The cluster infrastructure provider. Will automatically default to the correct value.") f.GinkgoRunSuiteOptions.BindFlags(flags) f.TestSuiteSelectionFlags.BindFlags(flags) f.OutputFlags.BindFlags(flags) @@ -75,16 +75,7 @@ func (f *RunSuiteFlags) SetIOStreams(streams genericclioptions.IOStreams) { f.GinkgoRunSuiteOptions.SetIOStreams(streams) } -func (f *RunSuiteFlags) ToOptions(args []string) (*RunSuiteOptions, error) { - adminRESTConfig, err := kubeconfig.GetStaticRESTConfig() - switch { - case err != nil && f.GinkgoRunSuiteOptions.DryRun: - fmt.Fprintf(f.ErrOut, "Unable to get admin rest config, skipping apigroup check in the dry-run mode: %v\n", err) - adminRESTConfig = &rest.Config{} - case err != nil && !f.GinkgoRunSuiteOptions.DryRun: - return nil, fmt.Errorf("unable to get admin rest config, %w", err) - } - +func (f *RunSuiteFlags) ToOptions(args []string, availableSuites []*testginkgo.TestSuite, internalExtension *extension.Extension) (*RunSuiteOptions, error) { closeFn, err := f.OutputFlags.ConfigureIOStreams(f.IOStreams, f) if err != nil { return nil, err @@ -98,13 +89,8 @@ func (f *RunSuiteFlags) ToOptions(args []string) (*RunSuiteOptions, error) { return nil, err } suite, err := f.TestSuiteSelectionFlags.SelectSuite( - f.AvailableSuites, - args, - kubeconfig.NewDiscoveryGetter(adminRESTConfig), - kubeconfig.NewConfigClientGetter(adminRESTConfig), - f.GinkgoRunSuiteOptions.DryRun, - providerConfig.MatchFn(), - ) + availableSuites, + args) if err != nil { return nil, err } @@ -112,6 +98,8 @@ func (f *RunSuiteFlags) ToOptions(args []string) (*RunSuiteOptions, error) { o := &RunSuiteOptions{ GinkgoRunSuiteOptions: ginkgoOptions, Suite: suite, + Extension: internalExtension, + ClusterConfig: providerConfig, FromRepository: f.FromRepository, CloudProviderJSON: providerConfig.ToJSONString(), CloseFn: closeFn, diff --git a/pkg/cmd/openshift-tests/run/options.go b/pkg/cmd/openshift-tests/run/options.go index 98966fea2008..13b32ab944a4 100644 --- a/pkg/cmd/openshift-tests/run/options.go +++ b/pkg/cmd/openshift-tests/run/options.go @@ -6,15 +6,18 @@ import ( "os" "path/filepath" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/klog/v2" + k8simage "k8s.io/kubernetes/test/utils/image" + + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" "github.com/openshift/origin/pkg/clioptions/imagesetup" "github.com/openshift/origin/pkg/clioptions/iooptions" "github.com/openshift/origin/pkg/monitortestframework" testginkgo "github.com/openshift/origin/pkg/test/ginkgo" "github.com/openshift/origin/pkg/version" "github.com/openshift/origin/test/extended/util/image" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/klog/v2" - k8simage "k8s.io/kubernetes/test/utils/image" ) // TODO collapse this with cmd_runsuite @@ -27,6 +30,12 @@ type RunSuiteOptions struct { CloseFn iooptions.CloseFunc genericclioptions.IOStreams + + // ClusterConfig contains cluster-specific configuration for filtering tests + ClusterConfig *clusterdiscovery.ClusterConfiguration + + // Extension is the internal origin extension of its own test specs. + Extension *extension.Extension } func (o *RunSuiteOptions) TestCommandEnvironment() []string { @@ -90,7 +99,9 @@ func (o *RunSuiteOptions) Run(ctx context.Context) error { fmt.Fprintf(os.Stderr, "%s version: %s\n", filepath.Base(os.Args[0]), version.Get().String()) } - exitErr := o.GinkgoRunSuiteOptions.Run(o.Suite, "openshift-tests", monitorTestInfo, false) + o.GinkgoRunSuiteOptions.Extension = o.Extension + + exitErr := o.GinkgoRunSuiteOptions.Run(o.Suite, o.ClusterConfig, "openshift-tests", monitorTestInfo, false) if exitErr != nil { fmt.Fprintf(os.Stderr, "Suite run returned error: %s\n", exitErr.Error()) } diff --git a/pkg/cmd/util.go b/pkg/cmd/util.go index ff7d8125f6ae..d2775fca5d5a 100644 --- a/pkg/cmd/util.go +++ b/pkg/cmd/util.go @@ -5,16 +5,29 @@ import ( "os" "github.com/spf13/cobra" + e2e "k8s.io/kubernetes/test/e2e/framework" "github.com/openshift/origin/pkg/version" ) +// RequireClusterAccess can be used as a PreRunE to ensure there's a valid kubeconfig available. It emits +// a user-friendly error message, since the upstream kube one is confusing. e2e.LoadConfig falls back to +// trying to find in-cluster service account creds, but this isn't even a way we support running origin. Give +// the user a nicer error telling them we expect to find a kubeconfig. +func RequireClusterAccess(_ *cobra.Command, _ []string) error { + if _, err := e2e.LoadConfig(true); err != nil { + return fmt.Errorf("failed to find cluster config: ensure KUBECONFIG is set") + } + + return nil +} + // PrintVersion is used as a PersistentPreRun function to ensure we always print the version. -var PrintVersion = func(cmd *cobra.Command, args []string) { +func PrintVersion(_ *cobra.Command, _ []string) { fmt.Fprintf(os.Stdout, "openshift-tests %s\n", version.Get().GitVersion) } // NoPrintVersion is used as an empty PersistentPreRun function so we don't print version info // for some commands. -var NoPrintVersion = func(cmd *cobra.Command, args []string) { +func NoPrintVersion(_ *cobra.Command, _ []string) { } diff --git a/pkg/test/extensions/binary.go b/pkg/test/extensions/binary.go index 812004efcfa9..2be6c4e90e93 100644 --- a/pkg/test/extensions/binary.go +++ b/pkg/test/extensions/binary.go @@ -16,17 +16,120 @@ import ( "syscall" "time" + originVersion "github.com/openshift/origin/pkg/version" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/mod/semver" kapierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" k8simage "k8s.io/kubernetes/test/utils/image" + k8sgenerated "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" + "github.com/openshift/origin/pkg/clioptions/imagesetup" + "github.com/openshift/origin/pkg/clioptions/upgradeoptions" "github.com/openshift/origin/test/extended/util" + exutil "github.com/openshift/origin/test/extended/util" + origingenerated "github.com/openshift/origin/test/extended/util/annotate/generated" + "github.com/openshift/origin/test/extended/util/image" ) +// InitializeOpenShiftTestsExtensionFramework creates and initializes the extension registry with the origin tests extension. +func InitializeOpenShiftTestsExtensionFramework() (*extension.Registry, *extension.Extension, error) { + // Create the origin extension + ov := originVersion.Get() + originExtension := &Extension{ + Extension: &extension.Extension{ + Component: extension.Component{ + Product: "openshift", + Kind: "payload", + Name: "origin", + }, + APIVersion: extension.CurrentExtensionAPIVersion, + }, + Source: Source{ + Source: &extension.Source{ + Commit: ov.GitCommit, + GitTreeState: ov.GitTreeState, + BuildDate: ov.BuildDate, + SourceURL: "https://github.com/openshift/origin", + }, + SourceBinary: os.Args[0], + SourceImage: "tests", + }, + } + + // Create our registry of openshift-tests extensions + extensionRegistry := extension.NewRegistry() + extensionRegistry.Register(originExtension.Extension) + + // Build our specs from ginkgo + specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() + if err != nil { + return nil, nil, fmt.Errorf("failed to build extension test specs: %w", err) + } + + // Apply annotations to test names + specs.Walk(func(spec *extensiontests.ExtensionTestSpec) { + // we need to ensure the default path always annotates both + // origin and k8s tests accordingly, since each of these + // currently have their own annotations which are not + // merged anywhere else but applied here + if append, ok := origingenerated.Annotations[spec.Name]; ok { + spec.Name += append + } + if append, ok := k8sgenerated.Annotations[spec.Name]; ok { + spec.Name += append + } + }) + + // Filter out kube tests, vendor filtering isn't working within origin + specs = specs.Select(func(spec *extensiontests.ExtensionTestSpec) bool { + return !strings.Contains(spec.Name, "[Suite:k8s") + }) + + specs.AddBeforeAll(func() { + config, err := clusterdiscovery.DecodeProvider(os.Getenv("TEST_PROVIDER"), false, false, nil) + if err != nil { + panic(err) + } + if err := clusterdiscovery.InitializeTestFramework(exutil.TestContext, config, false); err != nil { + panic(err) + } + klog.V(4).Infof("Loaded test configuration: %#v", exutil.TestContext) + + exutil.TestContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + image.InitializeImages(os.Getenv("KUBE_TEST_REPO")) + + if err := imagesetup.VerifyImages(); err != nil { + panic(err) + } + + // Handle upgrade options + upgradeOptionsYAML := os.Getenv("TEST_UPGRADE_OPTIONS") + upgradeOptions, err := upgradeoptions.NewUpgradeOptionsFromYAML(upgradeOptionsYAML) + if err != nil { + panic(err) + } + + if err := upgradeOptions.SetUpgradeGlobals(); err != nil { + panic(err) + } + }) + + originExtension.Extension.AddSpecs(specs) + + return extensionRegistry, originExtension.Extension, nil +} + // TestBinary implements the openshift-tests extension interface (Info, ListTests, RunTests, etc). type TestBinary struct { // The payload image tag in which an external binary path can be found @@ -35,7 +138,7 @@ type TestBinary struct { binaryPath string // Cache the info after gathering it - info *ExtensionInfo + info *Extension } // ImageSet maps a Kubernetes image ID to its corresponding configuration. @@ -50,7 +153,16 @@ type Image struct { Version string `json:"version"` } +// extensionBinaries is the registry of additional test binaries to use as extension tests. Members +// of the registry must be part of the release payload. var extensionBinaries = []TestBinary{ + // Self reference for origin's own internal extension + { + imageTag: "tests", + binaryPath: os.Args[0], + }, + + // Extensions in other payload images { imageTag: "hyperkube", binaryPath: "/usr/bin/k8s-tests-ext.gz", @@ -62,7 +174,7 @@ var extensionBinaries = []TestBinary{ } // Info returns information about this particular extension. -func (b *TestBinary) Info(ctx context.Context) (*ExtensionInfo, error) { +func (b *TestBinary) Info(ctx context.Context) (*Extension, error) { if b.info != nil { return b.info, nil } @@ -74,18 +186,29 @@ func (b *TestBinary) Info(ctx context.Context) (*ExtensionInfo, error) { command := exec.Command(b.binaryPath, "info") infoJson, err := runWithTimeout(ctx, command, 10*time.Minute) if err != nil { + logrus.Errorf("Failed to fetch info for %s: %v", binName, err) + logrus.Errorf("Command output for %s: %s", binName, string(infoJson)) return nil, fmt.Errorf("failed running '%s info': %w\nOutput: %s", b.binaryPath, err, infoJson) } jsonBegins := bytes.IndexByte(infoJson, '{') jsonEnds := bytes.LastIndexByte(infoJson, '}') - var info ExtensionInfo - err = json.Unmarshal(infoJson[jsonBegins:jsonEnds+1], &info) + if jsonBegins == -1 || jsonEnds == -1 || jsonBegins > jsonEnds { + logrus.Errorf("No valid JSON found in output from %s info command", binName) + logrus.Errorf("Raw output from %s: %s", binName, string(infoJson)) + return nil, fmt.Errorf("no valid JSON found in output from '%s info' command", binName) + } + var info Extension + jsonData := infoJson[jsonBegins : jsonEnds+1] + err = json.Unmarshal(jsonData, &info) if err != nil { - return nil, errors.Wrapf(err, "couldn't unmarshal extension info: %s", string(infoJson)) + logrus.Errorf("Failed to unmarshal JSON from %s: %v", binName, err) + logrus.Errorf("JSON data from %s: %s", binName, string(jsonData)) + return nil, errors.Wrapf(err, "couldn't unmarshal extension info from %s: %s", binName, string(jsonData)) } b.info = &info // Set fields origin knows or calculates: + b.info.Binary = b b.info.Source.SourceBinary = binName b.info.Source.SourceImage = b.imageTag b.info.ExtensionArtifactDir = path.Join(os.Getenv("ARTIFACT_DIR"), safeComponentPath(&b.info.Component)) @@ -187,7 +310,7 @@ func (b *TestBinary) RunTests(ctx context.Context, timeout time.Duration, env [] // - we got a test result we didn't expect at all (maybe the external binary improperly // mutated the name, or otherwise did something weird) if !expectedTests.Has(result.Name) { - result.Result = ResultFailed + result.Result = extensiontests.ResultFailed result.Error = fmt.Sprintf("test binary %q returned unexpected result: %s", binName, result.Name) } expectedTests.Delete(result.Name) @@ -200,12 +323,13 @@ func (b *TestBinary) RunTests(ctx context.Context, timeout time.Duration, env [] // we didn't get results for them. for _, expectedTest := range expectedTests.UnsortedList() { results = append(results, &ExtensionTestResult{ - Name: expectedTest, - Result: ResultFailed, - Output: string(testResult), - Error: "external binary did not produce a result for this test", - - Source: b.info.Source, + &extensiontests.ExtensionTestResult{ + Name: expectedTest, + Result: extensiontests.ResultFailed, + Output: string(testResult), + Error: "external binary did not produce a result for this test", + }, + b.info.Source, }) } @@ -245,6 +369,18 @@ func (b *TestBinary) ListImages(ctx context.Context) (ImageSet, error) { // ExtractAllTestBinaries determines the optimal release payload to use, and extracts all the external // test binaries from it, and returns a slice of them. func ExtractAllTestBinaries(ctx context.Context, parallelism int) (func(), TestBinaries, error) { + if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) > 0 { + logrus.Warning("Using built-in tests only due to OPENSHIFT_SKIP_EXTERNAL_TESTS being set") + var internalBinaries []*TestBinary + for _, b := range extensionBinaries { + if b.binaryPath == os.Args[0] { + internalBinaries = append(internalBinaries, &b) + } + } + + return func() {}, internalBinaries, nil + } + if parallelism < 1 { return nil, nil, errors.New("parallelism must be greater than zero") } @@ -352,6 +488,13 @@ func ExtractAllTestBinaries(ctx context.Context, parallelism int) (func(), TestB if !ok { return // Channel is closed } + + // Self reference, no need to extract + if b.binaryPath == os.Args[0] { + binaries = append(binaries, &b) + continue + } + testBinary, err := externalBinaryProvider.ExtractBinaryFromReleaseImage(b.imageTag, b.binaryPath) if err != nil { errCh <- err @@ -386,9 +529,9 @@ func ExtractAllTestBinaries(ctx context.Context, parallelism int) (func(), TestB type TestBinaries []*TestBinary // Info fetches the info from all TestBinaries using the specified parallelism. -func (binaries TestBinaries) Info(ctx context.Context, parallelism int) ([]*ExtensionInfo, error) { +func (binaries TestBinaries) Info(ctx context.Context, parallelism int) ([]*Extension, error) { var ( - infos []*ExtensionInfo + infos []*Extension mu sync.Mutex wg sync.WaitGroup errCh = make(chan error, len(binaries)) @@ -422,7 +565,9 @@ func (binaries TestBinaries) Info(ctx context.Context, parallelism int) ([]*Exte } info, err := binary.Info(ctx) if err != nil { - errCh <- err + binName := filepath.Base(binary.binaryPath) + logrus.Errorf("Failed to get info from binary %s: %v", binName, err) + errCh <- fmt.Errorf("binary %s: %w", binName, err) } mu.Lock() infos = append(infos, info) @@ -442,7 +587,8 @@ func (binaries TestBinaries) Info(ctx context.Context, parallelism int) ([]*Exte errs = append(errs, err.Error()) } if len(errs) > 0 { - return nil, fmt.Errorf("encountered errors while fetch info: %s", strings.Join(errs, ";")) + logrus.Errorf("Failed to fetch info from %d binaries", len(errs)) + return nil, fmt.Errorf("encountered errors while fetching info: %s", strings.Join(errs, "; ")) } return infos, nil @@ -482,6 +628,10 @@ func (binaries TestBinaries) ListImages(ctx context.Context, parallelism int) ([ if !ok { return // Channel was closed } + if binary.binaryPath == os.Args[0] { + continue // Skip self - only external binaries need to be queried for images + } + imageConfig, err := binary.ListImages(ctx) if err != nil { errCh <- err @@ -605,7 +755,7 @@ func runWithTimeout(ctx context.Context, c *exec.Cmd, timeout time.Duration) ([] var safePathRegexp = regexp.MustCompile(`[<>:"/\\|?*\s]+`) // safeComponentPath sanitizes a component identifier to be safe for use as a file or directory name. -func safeComponentPath(c *Component) string { +func safeComponentPath(c *extension.Component) string { return path.Join( safePathRegexp.ReplaceAllString(c.Product, "_"), safePathRegexp.ReplaceAllString(c.Kind, "_"), diff --git a/pkg/test/extensions/types.go b/pkg/test/extensions/types.go index 68848796b7e5..06636b9f6475 100644 --- a/pkg/test/extensions/types.go +++ b/pkg/test/extensions/types.go @@ -3,36 +3,26 @@ package extensions import ( "fmt" "strings" - "time" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" configv1 "github.com/openshift/api/config/v1" "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/sets" ) -// ExtensionInfo represents an extension to openshift-tests. -type ExtensionInfo struct { - APIVersion string `json:"apiVersion"` - Source Source `json:"source"` - Component Component `json:"component"` - - // Suites that the extension wants to advertise/participate in. - Suites []Suite `json:"suites"` +// Extension represents an extension to openshift-tests. +type Extension struct { + *extension.Extension // -- origin specific info -- - ExtensionArtifactDir string `json:"extension_artifact_dir"` + Binary *TestBinary `json:"-"` + Source Source `json:"source"` + ExtensionArtifactDir string `json:"extension_artifact_dir"` } // Source contains the details of the commit and source URL. type Source struct { - // Commit from which this binary was compiled. - Commit string `json:"commit"` - // BuildDate ISO8601 string of when the binary was built - BuildDate string `json:"build_date"` - // GitTreeState lets you know the status of the git tree (clean/dirty) - GitTreeState string `json:"git_tree_state"` - // SourceURL contains the url of the git repository (if known) that this extension was built from. - SourceURL string `json:"source_url,omitempty"` + *extension.Source // -- origin specific info -- @@ -43,31 +33,6 @@ type Source struct { SourceBinary string `json:"source_binary,omitempty"` } -// Component represents the component the binary acts on. -type Component struct { - // The product this component is part of. - Product string `json:"product"` - // The type of the component. - Kind string `json:"type"` - // The name of the component. - Name string `json:"name"` -} - -// Suite represents additional suites the extension wants to advertise. -type Suite struct { - // The name of the suite. - Name string `json:"name"` - // Parent suites this suite is part of. - Parents []string `json:"parents,omitempty"` - // Qualifiers are CEL expressions that are OR'd together for test selection that are members of the suite. - Qualifiers []string `json:"qualifiers,omitempty"` -} - -type Lifecycle string - -var LifecycleInforming Lifecycle = "informing" -var LifecycleBlocking Lifecycle = "blocking" - type ExtensionTestSpecs []*ExtensionTestSpec type EnvironmentSelector struct { @@ -76,115 +41,60 @@ type EnvironmentSelector struct { } type ExtensionTestSpec struct { - Name string `json:"name"` - - // OriginalName contains the very first name this test was ever known as, used to preserve - // history across all names. - OriginalName string `json:"originalName,omitempty"` - - // Labels are single string values to apply to the test spec - Labels sets.Set[string] `json:"labels"` - - // Tags are key:value pairs - Tags map[string]string `json:"tags,omitempty"` - - // Resources gives optional information about what's required to run this test. - Resources Resources `json:"resources"` - - // Source is the origin of the test. - Source string `json:"source"` - - // CodeLocations are the files where the spec originates from. - CodeLocations []string `json:"codeLocations,omitempty"` - - // Lifecycle informs the executor whether the test is informing only, and should not cause the - // overall job run to fail, or if it's blocking where a failure of the test is fatal. - // Informing lifecycle tests can be used temporarily to gather information about a test's stability. - // Tests must not remain informing forever. - Lifecycle Lifecycle `json:"lifecycle"` - - // EnvironmentSelector allows for CEL expressions to be used to control test inclusion - EnvironmentSelector EnvironmentSelector `json:"environmentSelector,omitempty"` + *extensiontests.ExtensionTestSpec // Binary invokes a link to the external binary that provided this test Binary *TestBinary } -type Resources struct { - Isolation Isolation `json:"isolation"` - Memory string `json:"memory,omitempty"` - Duration string `json:"duration,omitempty"` - Timeout string `json:"timeout,omitempty"` -} +// FilterWrappedSpecs applies the upstream Filter method (defined on extensiontests.ExtensionTestSpecs) +// while preserving our local ExtensionTestSpec wrappers. +// +// This is a bit awkward because our ExtensionTestSpecs is a slice of wrappers around +// *extensiontests.ExtensionTestSpec, but the Filter method only exists on the upstream slice type. +// To work around this, we: +// 1. Extract the underlying *extensiontests.ExtensionTestSpec values. +// 2. Call the upstream Filter. +// 3. Map the filtered results back to the original wrapped specs using pointer identity. +// +// This preserves metadata like the Binary field stored in our wrapper. +func FilterWrappedSpecs( + wrappedSpecs ExtensionTestSpecs, + qualifiers []string, +) (ExtensionTestSpecs, error) { + var baseSpecs extensiontests.ExtensionTestSpecs + specMap := make(map[*extensiontests.ExtensionTestSpec]*ExtensionTestSpec) + + for _, spec := range wrappedSpecs { + baseSpecs = append(baseSpecs, spec.ExtensionTestSpec) + specMap[spec.ExtensionTestSpec] = spec + } -type Isolation struct { - Mode string `json:"mode,omitempty"` - Conflict []string `json:"conflict,omitempty"` -} + filtered, err := baseSpecs.Filter(qualifiers) + if err != nil { + return nil, err + } -type ExtensionTestResults []*ExtensionTestResult + var result ExtensionTestSpecs + for _, f := range filtered { + if orig, ok := specMap[f]; ok { + result = append(result, orig) + } + } -type Result string + return result, nil +} -var ResultPassed Result = "passed" -var ResultSkipped Result = "skipped" -var ResultFailed Result = "failed" +type ExtensionTestResults []*ExtensionTestResult type ExtensionTestResult struct { - Name string `json:"name"` - Lifecycle Lifecycle `json:"lifecycle"` - Duration int64 `json:"duration"` - StartTime *DBTime `json:"startTime"` - EndTime *DBTime `json:"endTime"` - Result Result `json:"result"` - Output string `json:"output"` - Error string `json:"error,omitempty"` - Details []Details `json:"details,omitempty"` + *extensiontests.ExtensionTestResult // Source is the information from the extension binary (it's image tag, repo, commit sha, etc), reported // up by origin so it's easy to identify where a particular result came from in the overall combined result JSON. Source Source `json:"source"` } -// Details are human-readable messages to further explain skips, timeouts, etc. -// It can also be used to provide contemporaneous information about failures -// that may not be easily returned by must-gather. For larger artifacts (greater than -// 10KB, write them to $EXTENSION_ARTIFACTS_DIR. -type Details struct { - Name string `json:"name"` - Value interface{} `json:"value"` -} - -// DBTime is a type suitable for direct importing into databases like BigQuery, -// formatted like 2006-01-02 15:04:05.000000 UTC. -type DBTime time.Time - -func TimePtr(t time.Time) *DBTime { - return (*DBTime)(&t) -} - -func Time(t *DBTime) time.Time { - if t == nil { - return time.Time{} - } - return time.Time(*t) -} - -func (dbt *DBTime) MarshalJSON() ([]byte, error) { - formattedTime := time.Time(*dbt).Format(`"2006-01-02 15:04:05.000000 UTC"`) - return []byte(formattedTime), nil -} - -func (dbt *DBTime) UnmarshalJSON(b []byte) error { - timeStr := string(b[1 : len(b)-1]) - parsedTime, err := time.Parse("2006-01-02 15:04:05.000000 UTC", timeStr) - if err != nil { - return err - } - *dbt = (DBTime)(parsedTime) - return nil -} - // EnvironmentFlagName enumerates each possible EnvironmentFlag's name to be passed to the external binary type EnvironmentFlagName string diff --git a/pkg/test/extensions/util.go b/pkg/test/extensions/util.go index 7868a8ed70d6..54c37947690d 100644 --- a/pkg/test/extensions/util.go +++ b/pkg/test/extensions/util.go @@ -7,7 +7,6 @@ import ( "debug/elf" "encoding/json" "fmt" - "github.com/sirupsen/logrus" "io" "os" "os/exec" @@ -18,12 +17,22 @@ import ( "strings" "time" + "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime" + "github.com/sirupsen/logrus" + imagev1 "github.com/openshift/api/image/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/openshift/origin/test/extended/util" ) +func Time(t *dbtime.DBTime) time.Time { + if t == nil { + return time.Time{} + } + return time.Time(*t) +} + // ungzipFile checks if a binary is gzipped (ends with .gz) and decompresses it. // Returns the new filename of the decompressed file (original is deleted), or original filename if it was not gzipped. func ungzipFile(extractedBinary string) (string, error) { diff --git a/pkg/test/filters/chain.go b/pkg/test/filters/chain.go new file mode 100644 index 000000000000..7a9a03af33c5 --- /dev/null +++ b/pkg/test/filters/chain.go @@ -0,0 +1,82 @@ +package filters + +import ( + "context" + "fmt" + + "github.com/sirupsen/logrus" + + "github.com/openshift/origin/pkg/test/extensions" +) + +// TestFilter represents a single filtering step in the chain +type TestFilter interface { + // Name returns a human-readable name for this filter + Name() string + + // Filter applies the filtering logic and returns the filtered tests + Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) + + // ShouldApply returns true if this filter should be applied + ShouldApply() bool +} + +// TestFilterChain manages a sequence of test filters +type TestFilterChain struct { + filters []TestFilter + logger *logrus.Entry +} + +// NewFilterChain creates a new filter chain +func NewFilterChain(logger *logrus.Entry) *TestFilterChain { + if logger == nil { + logger = logrus.NewEntry(logrus.StandardLogger()) + } + return &TestFilterChain{ + filters: make([]TestFilter, 0), + logger: logger, + } +} + +// AddFilter adds a filter to the chain +func (p *TestFilterChain) AddFilter(filter TestFilter) *TestFilterChain { + p.filters = append(p.filters, filter) + return p +} + +// Apply runs all filters in sequence, logging each step +func (p *TestFilterChain) Apply(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + current := tests + + for _, filter := range p.filters { + flog := p.logger.WithField("filter", filter.Name()) + + if !filter.ShouldApply() { + flog.Debug("Skipping filter (not applicable)") + continue + } + + origCount := len(current) + flog.WithField("before", origCount). + Infof("Applying filter: %s", filter.Name()) + + filtered, err := filter.Filter(ctx, current) + if err != nil { + return nil, fmt.Errorf("filter %s failed: %w", filter.Name(), err) + } + + filteredCount := len(filtered) + removedCount := origCount - filteredCount + flog.WithField("before", origCount). + WithField("after", filteredCount). + WithField("removed", removedCount). + Infof("Filter %s completed - removed %d tests", filter.Name(), removedCount) + + current = filtered + } + + p.logger.WithField("final_count", len(current)). + Infof("Filter chain completed with %d tests", len(current)) + + return current, nil +} diff --git a/pkg/test/filters/chain_test.go b/pkg/test/filters/chain_test.go new file mode 100644 index 000000000000..8eb388166edb --- /dev/null +++ b/pkg/test/filters/chain_test.go @@ -0,0 +1,90 @@ +package filters + +import ( + "context" + "strings" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/openshift/origin/pkg/test/extensions" +) + +func TestFilterChainBasic(t *testing.T) { + logger := logrus.NewEntry(logrus.New()) + logger.Logger.SetLevel(logrus.FatalLevel) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "normal test"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Disabled:reason]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "another normal test"}}, + } + + pipeline := NewFilterChain(logger). + AddFilter(&DisabledTestsFilter{}) + + result, err := pipeline.Apply(context.Background(), tests) + + require.NoError(t, err) + assert.Len(t, result, 2) // Should filter out the disabled test + assert.Equal(t, "normal test", result[0].Name) + assert.Equal(t, "another normal test", result[1].Name) +} + +func TestDisabledTestsFilter(t *testing.T) { + filter := &DisabledTestsFilter{} + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "normal test"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Disabled:reason]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "another normal test"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Skipped:reason]"}}, // This won't be filtered by isDisabled + } + + result, err := filter.Filter(context.Background(), tests) + + require.NoError(t, err) + assert.Len(t, result, 3) // Only the [Disabled:reason] test should be filtered out + + // Check that the disabled test was filtered out + for _, test := range result { + assert.False(t, strings.Contains(test.Name, "[Disabled:reason]")) + } +} + +func TestFilterPipelineErrorHandling(t *testing.T) { + logger := logrus.NewEntry(logrus.New()) + logger.Logger.SetLevel(logrus.FatalLevel) // Suppress logs during tests + + errorFilter := &testErrorFilter{} + + pipeline := NewFilterChain(logger). + AddFilter(errorFilter) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test"}}, + } + + result, err := pipeline.Apply(context.Background(), tests) + + assert.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "test-error-filter failed") +} + +type testErrorFilter struct{} + +func (f *testErrorFilter) Name() string { + return "test-error-filter" +} + +func (f *testErrorFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + return nil, assert.AnError +} + +func (f *testErrorFilter) ShouldApply() bool { + return true +} diff --git a/pkg/test/filters/cluster_state.go b/pkg/test/filters/cluster_state.go new file mode 100644 index 000000000000..2372f75c4081 --- /dev/null +++ b/pkg/test/filters/cluster_state.go @@ -0,0 +1,175 @@ +package filters + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" + "github.com/openshift/origin/pkg/test/extensions" +) + +// ClusterStateFilter filters tests based on cluster environment +type ClusterStateFilter struct { + config *clusterdiscovery.ClusterConfiguration + skips []string +} + +func NewClusterStateFilter(config *clusterdiscovery.ClusterConfiguration) *ClusterStateFilter { + if config == nil { + logrus.Warn("Cluster state filter is disabled, cluster config is nil") + return &ClusterStateFilter{} + } + + skips := []string{fmt.Sprintf("[Skipped:%s]", config.ProviderName)} + + if config.IsIBMROKS { + skips = append(skips, "[Skipped:ibmroks]") + } + if config.NetworkPlugin != "" { + skips = append(skips, fmt.Sprintf("[Skipped:Network/%s]", config.NetworkPlugin)) + if config.NetworkPluginMode != "" { + skips = append(skips, fmt.Sprintf("[Skipped:Network/%s/%s]", config.NetworkPlugin, config.NetworkPluginMode)) + } + } + + if config.Disconnected { + skips = append(skips, "[Skipped:Disconnected]") + } + + if config.IsProxied { + skips = append(skips, "[Skipped:Proxy]") + } + + if config.SingleReplicaTopology { + skips = append(skips, "[Skipped:SingleReplicaTopology]") + } + + if !config.HasIPv4 { + skips = append(skips, "[Feature:Networking-IPv4]") + } + if !config.HasIPv6 { + skips = append(skips, "[Feature:Networking-IPv6]") + } + if !config.HasIPv4 || !config.HasIPv6 { + // lack of "]" is intentional; this matches multiple tags + skips = append(skips, "[Feature:IPv6DualStack") + } + + if !config.HasSCTP { + skips = append(skips, "[Feature:SCTPConnectivity]") + } + + if config.HasNoOptionalCapabilities { + skips = append(skips, "[Skipped:NoOptionalCapabilities]") + } + + logrus.WithField("skips", skips).Info("Generated skips for cluster state") + + return &ClusterStateFilter{ + config: config, + skips: skips, + } +} + +func (f *ClusterStateFilter) Name() string { + return "cluster-state" +} + +func (f *ClusterStateFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + if f.config == nil { + return tests, nil + } + + filtered := make(extensions.ExtensionTestSpecs, 0, len(tests)) + for _, test := range tests { + if f.matchTest(test.Name) { + filtered = append(filtered, test) + } + } + return filtered, nil +} + +func (f *ClusterStateFilter) ShouldApply() bool { + return f.config != nil +} + +// Regular expressions for parsing test labels +var ( + apiGroupRegex = regexp.MustCompile(`\[apigroup:([^]]*)\]`) + featureGateRegex = regexp.MustCompile(`\[OCPFeatureGate:([^]]*)\]`) +) + +// matchTest implements the cluster-based test matching logic +func (f *ClusterStateFilter) matchTest(name string) bool { + // Check skip conditions + for _, skip := range f.skips { + if strings.Contains(name, skip) { + logrus.WithField("test", name).WithField("skip", skip).Debug("Skipping test") + return false + } + } + + // Check API groups + requiredAPIGroups := []string{} + matches := apiGroupRegex.FindAllStringSubmatch(name, -1) + for _, match := range matches { + if len(match) < 2 { + panic(fmt.Errorf("regexp match %v is invalid: len(match) < 2 for %v", match, name)) + } + apigroup := match[1] + requiredAPIGroups = append(requiredAPIGroups, apigroup) + } + + if len(requiredAPIGroups) > 0 && (f.config.APIGroups == nil || !f.config.APIGroups.HasAll(requiredAPIGroups...)) { + available := "none" + if f.config.APIGroups != nil { + available = strings.Join(f.config.APIGroups.UnsortedList(), ",") + } + + logrus.WithField("test", name). + WithField("requiredAPIGroups", requiredAPIGroups). + WithField("availableGroups", available). + Debug("Skipping test") + return false + } + + // Apply feature gate filtering - keep this last + featureGates := []string{} + matches = featureGateRegex.FindAllStringSubmatch(name, -1) + for _, match := range matches { + if len(match) < 2 { + panic(fmt.Errorf("regexp match %v is invalid: len(match) < 2 for %v", match, name)) + } + featureGate := match[1] + featureGates = append(featureGates, featureGate) + } + + if len(featureGates) == 0 { + return true + } + + // If any of the required feature gates are disabled, skip the test + if f.config.DisabledFeatureGates != nil && f.config.DisabledFeatureGates.HasAny(featureGates...) { + logrus.WithField("test", name). + WithField("disabledFeatureGates", f.config.DisabledFeatureGates.UnsortedList()). + WithField("requiredFeatureGates", featureGates). + Debug("Skipping test") + return false + } + + // It is important that we always return true if we don't know the status of the gate. + // This generally means we have no opinion on whether the feature is on or off. + // We expect the default case to be on, as this is what would happen after a feature is promoted, + // and the gate is removed. + // + // Therefore, if there are any feature gates defined at all in the cluster, we should + // run the tests as long as none of the required gates are explicitly disabled. + // If there are no feature gates at all in the cluster, we should not run feature gate tests. + hasAnyFeatureGates := (f.config.EnabledFeatureGates != nil && f.config.EnabledFeatureGates.Len() > 0) || + (f.config.DisabledFeatureGates != nil && f.config.DisabledFeatureGates.Len() > 0) + return hasAnyFeatureGates +} diff --git a/pkg/test/filters/cluster_state_test.go b/pkg/test/filters/cluster_state_test.go new file mode 100644 index 000000000000..669ae8a3ba19 --- /dev/null +++ b/pkg/test/filters/cluster_state_test.go @@ -0,0 +1,251 @@ +package filters + +import ( + "context" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" + "github.com/openshift/origin/pkg/test/extensions" +) + +func TestClusterStateFilter(t *testing.T) { + config := &clusterdiscovery.ClusterConfiguration{ + ProviderName: "aws", + NetworkPlugin: "OVNKubernetes", + HasIPv4: true, + HasIPv6: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + } + filter := NewClusterStateFilter(config) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "normal test"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Skipped:aws]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Feature:Networking-IPv6]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test [Feature:Networking-IPv4]"}}, + } + + result, err := filter.Filter(context.Background(), tests) + + require.NoError(t, err) + assert.Len(t, result, 2) // Should filter out aws-skipped and IPv6 tests + assert.Equal(t, "normal test", result[0].Name) + assert.Equal(t, "test [Feature:Networking-IPv4]", result[1].Name) +} + +// Test data for comprehensive cluster state filter testing +var e2eTestNames = map[string]string{ + "everyone": "[Skipped:Wednesday]", + "not-gce": "[Skipped:gce]", + "not-aws": "[Skipped:aws]", + "not-sdn": "[Skipped:Network/OpenShiftSDN]", + "not-multitenant": "[Skipped:Network/OpenShiftSDN/Multitenant]", + "online": "[Skipped:Disconnected]", + "ipv4": "[Feature:Networking-IPv4]", + "ipv6": "[Feature:Networking-IPv6]", + "dual-stack": "[Feature:IPv6DualStackAlpha]", + "sctp": "[Feature:SCTPConnectivity]", + "requires-optional-cap": "[Skipped:NoOptionalCapabilities]", + "apigroup-apps": "[apigroup:apps]", + "apigroup-missing": "[apigroup:missing]", + "featuregate-enabled": "[OCPFeatureGate:FeatureA]", + "featuregate-missing": "[OCPFeatureGate:MissingFeature]", + "featuregate-disabled": "[OCPFeatureGate:DisabledFeature]", +} + +// Helper function to create test specs from test names +func createTestSpecs(testNames map[string]string) extensions.ExtensionTestSpecs { + specs := make(extensions.ExtensionTestSpecs, 0, len(testNames)) + for name, tags := range testNames { + fullName := name + " " + tags + specs = append(specs, &extensions.ExtensionTestSpec{ + ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: fullName}, + }) + } + return specs +} + +// Helper function to extract test names from filtered results +func extractTestNames(specs extensions.ExtensionTestSpecs, testNames map[string]string) sets.Set[string] { + result := sets.New[string]() + for _, spec := range specs { + for name, tags := range testNames { + fullName := name + " " + tags + if spec.Name == fullName { + result.Insert(name) + break + } + } + } + return result +} + +func TestClusterStateFilterComprehensive(t *testing.T) { + testCases := []struct { + name string + config *clusterdiscovery.ClusterConfiguration + runTests sets.Set[string] + }{ + { + name: "simple GCE", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), + }, + { + name: "GCE multitenant", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + NetworkPluginMode: "Multitenant", + HasIPv4: true, + HasIPv6: false, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-aws", "online", "ipv4", "requires-optional-cap"), + }, + { + name: "simple non-cloud", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "skeleton", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-gce", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap"), + }, + { + name: "complex override dual-stack", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "aws", + NetworkPlugin: "OVNKubernetes", + HasIPv4: true, + HasIPv6: true, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "ipv4", "ipv6", "dual-stack", "requires-optional-cap"), + }, + { + name: "disconnected", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "none", + NetworkPlugin: "OVNKubernetes", + HasIPv4: true, + HasIPv6: true, + Disconnected: true, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-gce", "not-aws", "not-sdn", "not-multitenant", "ipv4", "ipv6", "dual-stack", "requires-optional-cap"), + }, + { + name: "override network plugin with SCTP", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "aws", + NetworkPlugin: "Calico", + HasIPv4: false, + HasIPv6: true, + HasSCTP: true, + HasNoOptionalCapabilities: false, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-gce", "not-sdn", "not-multitenant", "online", "ipv6", "sctp", "requires-optional-cap"), + }, + { + name: "no optional capabilities", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + HasNoOptionalCapabilities: true, + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + APIGroups: sets.New[string](), + }, + runTests: sets.New("everyone", "not-aws", "not-multitenant", "online", "ipv4"), + }, + { + name: "API group filtering", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + APIGroups: sets.New("apps", "extensions"), + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New[string](), + }, + runTests: sets.New("everyone", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap", "apigroup-apps"), + }, + { + name: "Feature gate filtering", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + APIGroups: sets.New[string](), + EnabledFeatureGates: sets.New("FeatureA", "FeatureB"), + DisabledFeatureGates: sets.New("DisabledFeature"), + }, + runTests: sets.New("everyone", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap", "featuregate-enabled", "featuregate-missing"), + }, + { + name: "Feature gate filtering - only disabled gates", + config: &clusterdiscovery.ClusterConfiguration{ + ProviderName: "gce", + NetworkPlugin: "OpenShiftSDN", + HasIPv4: true, + HasIPv6: false, + APIGroups: sets.New[string](), + EnabledFeatureGates: sets.New[string](), + DisabledFeatureGates: sets.New("DisabledFeature"), + }, + runTests: sets.New("everyone", "not-aws", "not-multitenant", "online", "ipv4", "requires-optional-cap", "featuregate-enabled", "featuregate-missing"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filter := NewClusterStateFilter(tc.config) + testSpecs := createTestSpecs(e2eTestNames) + + result, err := filter.Filter(context.Background(), testSpecs) + + require.NoError(t, err) + runTests := extractTestNames(result, e2eTestNames) + assert.True(t, runTests.Equal(tc.runTests), + "Expected tests: %v, got: %v", tc.runTests.UnsortedList(), runTests.UnsortedList()) + }) + } +} diff --git a/pkg/test/filters/disabled.go b/pkg/test/filters/disabled.go new file mode 100644 index 000000000000..ceee32fcf610 --- /dev/null +++ b/pkg/test/filters/disabled.go @@ -0,0 +1,75 @@ +package filters + +import ( + "context" + "regexp" + "strings" + "time" + + "github.com/openshift/origin/pkg/test/extensions" +) + +// DisabledTestsFilter filters out disabled tests +type DisabledTestsFilter struct{} + +func (f *DisabledTestsFilter) Name() string { + return "disabled-tests" +} + +func (f *DisabledTestsFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + enabled := make(extensions.ExtensionTestSpecs, 0, len(tests)) + for _, test := range tests { + if isDisabled(test.Name) { + continue + } + enabled = append(enabled, test) + } + return enabled, nil +} + +func (f *DisabledTestsFilter) ShouldApply() bool { + return true +} + +// isDisabled checks if a test is disabled based on its name +func isDisabled(name string) bool { + if strings.Contains(name, "[Disabled") { + return true + } + + return shouldSkipUntil(name) +} + +// shouldSkipUntil allows a test to be skipped with a time limit. +// the test should be annotated with the 'SkippedUntil' tag, as shown below. +// +// [SkippedUntil:05092022:blocker-bz/123456] +// +// - the specified date should conform to the 'MMDDYYYY' format. +// - a valid blocker BZ must be specified +// if the specified date in the tag has not passed yet, the test +// will be skipped by the runner. +func shouldSkipUntil(name string) bool { + re, err := regexp.Compile(`\[SkippedUntil:(\d{8}):blocker-bz\/([a-zA-Z0-9]+)\]`) + if err != nil { + // it should only happen with a programmer error and unit + // test will prevent that + return false + } + matches := re.FindStringSubmatch(name) + if len(matches) != 3 { + return false + } + + dateStr := matches[1] + // bzNumber := matches[2] // not used for now + + // parse the date + date, err := time.Parse("01022006", dateStr) + if err != nil { + return false + } + + // if the date has passed, don't skip the test + return time.Now().Before(date) +} diff --git a/pkg/testsuites/standard_suites_test.go b/pkg/test/filters/disabled_test.go similarity index 99% rename from pkg/testsuites/standard_suites_test.go rename to pkg/test/filters/disabled_test.go index 4443459753b9..38770b71714c 100644 --- a/pkg/testsuites/standard_suites_test.go +++ b/pkg/test/filters/disabled_test.go @@ -1,4 +1,4 @@ -package testsuites +package filters import ( "fmt" diff --git a/pkg/test/filters/kube_rebase.go b/pkg/test/filters/kube_rebase.go new file mode 100644 index 000000000000..c781b24dea98 --- /dev/null +++ b/pkg/test/filters/kube_rebase.go @@ -0,0 +1,72 @@ +package filters + +import ( + "context" + "strings" + + "github.com/sirupsen/logrus" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + + "github.com/openshift/origin/pkg/test/extensions" +) + +// KubeRebaseTestsFilter filters out tests during k8s rebase +type KubeRebaseTestsFilter struct { + restConfig *rest.Config +} + +func NewKubeRebaseTestsFilter(restConfig *rest.Config) *KubeRebaseTestsFilter { + return &KubeRebaseTestsFilter{ + restConfig: restConfig, + } +} + +func (f *KubeRebaseTestsFilter) Name() string { + return "kube-rebase-tests" +} + +func (f *KubeRebaseTestsFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + if f.restConfig == nil { + return tests, nil + } + + discoveryClient, err := discovery.NewDiscoveryClientForConfig(f.restConfig) + if err != nil { + return nil, err + } + serverVersion, err := discoveryClient.ServerVersion() + if err != nil { + return nil, err + } + + // TODO: this version along with below exclusions lists needs to be updated + // for the rebase in-progress. + if !strings.HasPrefix(serverVersion.Minor, "31") { + return tests, nil + } + + // Below list should only be filled in when we're trying to land k8s rebase. + // Don't pile them up! + exclusions := []string{ + // affected by the available controller split https://github.com/kubernetes/kubernetes/pull/126149 + `[sig-api-machinery] health handlers should contain necessary checks`, + } + + matches := make(extensions.ExtensionTestSpecs, 0, len(tests)) +outerLoop: + for _, test := range tests { + for _, excl := range exclusions { + if strings.Contains(test.Name, excl) { + logrus.Infof("Skipping %q due to rebase in-progress", test.Name) + continue outerLoop + } + } + matches = append(matches, test) + } + return matches, nil +} + +func (f *KubeRebaseTestsFilter) ShouldApply() bool { + return f.restConfig != nil +} diff --git a/pkg/test/filters/kube_rebase_test.go b/pkg/test/filters/kube_rebase_test.go new file mode 100644 index 000000000000..8f72f80e5a73 --- /dev/null +++ b/pkg/test/filters/kube_rebase_test.go @@ -0,0 +1,29 @@ +package filters + +import ( + "context" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/openshift/origin/pkg/test/extensions" +) + +func TestKubeRebaseFilter(t *testing.T) { + // Test with nil config (should pass all tests through) + filter := NewKubeRebaseTestsFilter(nil) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "normal test"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "[sig-api-machinery] health handlers should contain necessary checks"}}, + } + + result, err := filter.Filter(context.Background(), tests) + + require.NoError(t, err) + assert.Len(t, result, 2) // All tests should pass through when restConfig is nil + assert.Equal(t, "normal test", result[0].Name) + assert.Equal(t, "[sig-api-machinery] health handlers should contain necessary checks", result[1].Name) +} diff --git a/pkg/test/filters/match_function.go b/pkg/test/filters/match_function.go new file mode 100644 index 000000000000..4604b39c8156 --- /dev/null +++ b/pkg/test/filters/match_function.go @@ -0,0 +1,39 @@ +package filters + +import ( + "context" + + "github.com/openshift/origin/pkg/test/extensions" +) + +// MatchFnFilter applies a test matching function +type MatchFnFilter struct { + matchFn func(name string) bool +} + +func NewMatchFnFilter(matchFn func(name string) bool) *MatchFnFilter { + return &MatchFnFilter{matchFn: matchFn} +} + +func (f *MatchFnFilter) Name() string { + return "match-function" +} + +func (f *MatchFnFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + if f.matchFn == nil { + return tests, nil + } + + matches := make(extensions.ExtensionTestSpecs, 0, len(tests)) + for _, test := range tests { + if !f.matchFn(test.Name) { + continue + } + matches = append(matches, test) + } + return matches, nil +} + +func (f *MatchFnFilter) ShouldApply() bool { + return f.matchFn != nil +} diff --git a/pkg/test/filters/match_function_test.go b/pkg/test/filters/match_function_test.go new file mode 100644 index 000000000000..b5796520a6f9 --- /dev/null +++ b/pkg/test/filters/match_function_test.go @@ -0,0 +1,29 @@ +package filters + +import ( + "context" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/openshift/origin/pkg/test/extensions" +) + +func TestSuiteMatcherFilter(t *testing.T) { + // Test with nil matcher (should pass all tests through) + filter := NewMatchFnFilter(nil) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2"}}, + } + + result, err := filter.Filter(context.Background(), tests) + + require.NoError(t, err) + assert.Len(t, result, 2) // All tests should pass through when suite is nil + assert.Equal(t, "test1", result[0].Name) + assert.Equal(t, "test2", result[1].Name) +} diff --git a/pkg/test/filters/qualifiers.go b/pkg/test/filters/qualifiers.go new file mode 100644 index 000000000000..fd74c7522e9d --- /dev/null +++ b/pkg/test/filters/qualifiers.go @@ -0,0 +1,42 @@ +package filters + +import ( + "context" + "fmt" + + "github.com/openshift/origin/pkg/test/extensions" +) + +// QualifiersFilter filters tests based on qualifiers (CEL expressions) +type QualifiersFilter struct { + qualifiers []string +} + +func NewQualifiersFilter(qualifiers []string) *QualifiersFilter { + return &QualifiersFilter{ + qualifiers: qualifiers, + } +} + +func (f *QualifiersFilter) Name() string { + return "suite-qualifiers" +} + +// Filter filters tests based on suite qualifying CEL expressions. +func (f *QualifiersFilter) Filter(ctx context.Context, tests extensions.ExtensionTestSpecs) (extensions.ExtensionTestSpecs, error) { + if len(f.qualifiers) == 0 { + return tests, nil + } + + // Apply qualifier filtering directly to the ExtensionTestSpecs + filteredSpecs, err := extensions.FilterWrappedSpecs(tests, f.qualifiers) + if err != nil { + return nil, fmt.Errorf("failed to filter tests by qualifiers: %w", err) + } + + return filteredSpecs, nil +} + +func (f *QualifiersFilter) ShouldApply() bool { + return len(f.qualifiers) > 0 +} diff --git a/pkg/test/filters/qualifiers_test.go b/pkg/test/filters/qualifiers_test.go new file mode 100644 index 000000000000..09151ab737df --- /dev/null +++ b/pkg/test/filters/qualifiers_test.go @@ -0,0 +1,141 @@ +package filters + +import ( + "context" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/openshift/origin/pkg/test/extensions" +) + +func TestQualifiersFilter(t *testing.T) { + testCases := []struct { + name string + qualifiers []string + tests extensions.ExtensionTestSpecs + expected []string + }{ + { + name: "no qualifiers - pass all tests through", + qualifiers: nil, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test3"}}, + }, + expected: []string{"test1", "test2", "test3"}, + }, + { + name: "empty qualifiers - pass all tests through", + qualifiers: []string{}, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2"}}, + }, + expected: []string{"test1", "test2"}, + }, + { + name: "filter by name contains", + qualifiers: []string{`name.contains("api")`}, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test api functionality"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test ui functionality"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "api server test"}}, + }, + expected: []string{"test api functionality", "api server test"}, + }, + { + name: "filter by suite tag", + qualifiers: []string{`name.contains("[Suite:openshift/conformance")`}, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1 [Suite:openshift/conformance/parallel]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2 [Suite:openshift/disruptive]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test3 [Suite:openshift/conformance/serial]"}}, + }, + expected: []string{"test1 [Suite:openshift/conformance/parallel]", "test3 [Suite:openshift/conformance/serial]"}, + }, + { + name: "filter by early tests", + qualifiers: []string{`name.contains("[Early]")`}, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1 [Early] [Suite:openshift/conformance]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2 [Late] [Suite:openshift/conformance]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test3 [Suite:openshift/conformance]"}}, + }, + expected: []string{"test1 [Early] [Suite:openshift/conformance]"}, + }, + { + name: "qualifiers are OR'd", + qualifiers: []string{`name.contains("[Early]")`, `name.contains("[Late]")`}, + tests: extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1 [Early] [Suite:openshift/conformance]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test2 [Late] [Suite:openshift/conformance]"}}, + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test3 [Suite:openshift/conformance]"}}, + }, + expected: []string{"test1 [Early] [Suite:openshift/conformance]", "test2 [Late] [Suite:openshift/conformance]"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filter := NewQualifiersFilter(tc.qualifiers) + + result, err := filter.Filter(context.Background(), tc.tests) + + require.NoError(t, err) + assert.Len(t, result, len(tc.expected)) + for i, expectedName := range tc.expected { + assert.Equal(t, expectedName, result[i].Name) + } + }) + } +} + +func TestQualifiersFilterShouldApply(t *testing.T) { + testCases := []struct { + name string + qualifiers []string + expected bool + }{ + { + name: "nil qualifiers", + qualifiers: nil, + expected: false, + }, + { + name: "empty qualifiers", + qualifiers: []string{}, + expected: false, + }, + { + name: "with qualifiers", + qualifiers: []string{`name.contains("test")`}, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filter := NewQualifiersFilter(tc.qualifiers) + assert.Equal(t, tc.expected, filter.ShouldApply()) + }) + } +} + +func TestQualifiersFilterErrorHandling(t *testing.T) { + // Test with invalid CEL expression + filter := NewQualifiersFilter([]string{`invalid.cel.expression(`}) + + tests := extensions.ExtensionTestSpecs{ + &extensions.ExtensionTestSpec{ExtensionTestSpec: &extensiontests.ExtensionTestSpec{Name: "test1"}}, + } + + result, err := filter.Filter(context.Background(), tests) + + assert.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "failed to filter tests by qualifiers") +} diff --git a/pkg/test/ginkgo/cmd_runsuite.go b/pkg/test/ginkgo/cmd_runsuite.go index 02ce82c4f67f..0f8413d92d11 100644 --- a/pkg/test/ginkgo/cmd_runsuite.go +++ b/pkg/test/ginkgo/cmd_runsuite.go @@ -18,28 +18,25 @@ import ( "time" "github.com/onsi/ginkgo/v2" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" configv1 "github.com/openshift/api/config/v1" - clientconfigv1 "github.com/openshift/client-go/config/clientset/versioned" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/pflag" "golang.org/x/mod/semver" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" e2e "k8s.io/kubernetes/test/e2e/framework" "github.com/openshift/origin/pkg/clioptions/clusterdiscovery" "github.com/openshift/origin/pkg/clioptions/clusterinfo" - "github.com/openshift/origin/pkg/clioptions/kubeconfig" "github.com/openshift/origin/pkg/defaultmonitortests" "github.com/openshift/origin/pkg/monitor" monitorserialization "github.com/openshift/origin/pkg/monitor/serialization" "github.com/openshift/origin/pkg/monitortestframework" "github.com/openshift/origin/pkg/riskanalysis" "github.com/openshift/origin/pkg/test/extensions" + "github.com/openshift/origin/pkg/test/filters" "github.com/openshift/origin/pkg/test/ginkgo/junitapi" ) @@ -89,6 +86,7 @@ type GinkgoRunSuiteOptions struct { ExactMonitorTests []string DisableMonitorTests []string + Extension *extension.Extension } func NewGinkgoRunSuiteOptions(streams genericclioptions.IOStreams) *GinkgoRunSuiteOptions { @@ -147,94 +145,101 @@ func max(a, b int) int { return b } -func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, monitorTestInfo monitortestframework.MonitorTestInitializationInfo, - upgrade bool) error { - ctx := context.Background() +// shouldRetryTest determines if a failed test should be retried based on retry policies. +// It returns true if the test is eligible for retry, false otherwise. +func shouldRetryTest(ctx context.Context, test *testCase, permittedRetryImageTags []string) bool { + // Internal tests (no binary) are eligible for retry, we shouldn't really have any of these + // now that origin is also an extension. + if test.binary == nil { + return true + } + + tlog := logrus.WithField("test", test.name) - tests, err := testsForSuite() + // Get extension info to check if it's from a permitted image + info, err := test.binary.Info(ctx) if err != nil { - return fmt.Errorf("failed reading origin test suites: %w", err) + tlog.WithError(err). + Debug("Failed to get binary info, skipping retry") + return false } + // Check if the test's source image is in the permitted retry list + for _, permittedTag := range permittedRetryImageTags { + if strings.Contains(info.Source.SourceImage, permittedTag) { + tlog.WithField("image", info.Source.SourceImage). + Debug("Permitting retry") + return true + } + } + + tlog.WithField("image", info.Source.SourceImage). + Debug("Test not eligible for retry based on image tag") + return false +} + +func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, clusterConfig *clusterdiscovery.ClusterConfiguration, junitSuiteName string, monitorTestInfo monitortestframework.MonitorTestInitializationInfo, + upgrade bool) error { + ctx := context.Background() var sharder Sharder switch o.ShardStrategy { default: sharder = &HashSharder{} } - logrus.WithField("suite", suite.Name).Infof("Found %d internal tests in openshift-tests binary", len(tests)) + defaultBinaryParallelism := 10 - var fallbackSyntheticTestResult []*junitapi.JUnitTestCase - var externalTestCases []*testCase - if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) == 0 { - // Extract all test binaries - extractionContext, extractionContextCancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer extractionContextCancel() - cleanUpFn, externalBinaries, err := extensions.ExtractAllTestBinaries(extractionContext, 10) - if err != nil { - return err - } - defer cleanUpFn() + // Extract all test binaries + extractionContext, extractionContextCancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer extractionContextCancel() + cleanUpFn, allBinaries, err := extensions.ExtractAllTestBinaries(extractionContext, defaultBinaryParallelism) + if err != nil { + return err + } + defer cleanUpFn() - defaultBinaryParallelism := 10 + // Learn about the extension binaries available + infoContext, infoContextCancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer infoContextCancel() + logrus.Infof("Fetching info from %d extension binaries", len(allBinaries)) + extensionsInfo, err := allBinaries.Info(infoContext, defaultBinaryParallelism) + if err != nil { + logrus.Errorf("Failed to fetch extension info: %v", err) + return fmt.Errorf("failed to fetch extension info: %w", err) + } - // Learn about the extension binaries available - // TODO(stbenjam): we'll eventually use this information to get suite information -- but not yet in this iteration - infoContext, infoContextCancel := context.WithTimeout(context.Background(), 30*time.Minute) - defer infoContextCancel() - extensionsInfo, err := externalBinaries.Info(infoContext, defaultBinaryParallelism) - if err != nil { - return err - } - logrus.Infof("Discovered %d extensions", len(extensionsInfo)) - for _, e := range extensionsInfo { - id := fmt.Sprintf("%s:%s:%s", e.Component.Product, e.Component.Kind, e.Component.Name) - logrus.Infof("Extension %s found in %s:%s using API version %s", id, e.Source.SourceImage, e.Source.SourceBinary, e.APIVersion) - } + logrus.Infof("Discovered %d extensions", len(extensionsInfo)) + for _, e := range extensionsInfo { + id := fmt.Sprintf("%s:%s:%s", e.Component.Product, e.Component.Kind, e.Component.Name) + logrus.Infof("Extension %s found in %s:%s using API version %s", id, e.Source.SourceImage, e.Source.SourceBinary, e.APIVersion) + } - // List tests from all available binaries and convert them to origin's testCase format - listContext, listContextCancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer listContextCancel() + // List tests from all available binaries and convert them to origin's testCase format + listContext, listContextCancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer listContextCancel() - envFlags, err := determineEnvironmentFlags(ctx, upgrade, o.DryRun) - if err != nil { - return fmt.Errorf("could not determine environment flags: %w", err) - } - logrus.WithFields(envFlags.LogFields()).Infof("Determined all potential environment flags") + envFlags, err := determineEnvironmentFlags(ctx, upgrade, o.DryRun) + if err != nil { + return fmt.Errorf("could not determine environment flags: %w", err) + } + logrus.WithFields(envFlags.LogFields()).Infof("Determined all potential environment flags") - externalTestSpecs, err := externalBinaries.ListTests(listContext, defaultBinaryParallelism, envFlags) - if err != nil { - return err - } - externalTestCases = externalBinaryTestsToOriginTestCases(externalTestSpecs) - - var filteredTests []*testCase - for _, test := range tests { - // tests contains all the tests "registered" in openshift-tests binary, - // this also includes vendored k8s tests, since this path assumes we're - // using external binary to run these tests we need to remove them - // from the final lists, which contains: - // 1. origin tests, only - // 2. k8s tests, coming from external binary - if !strings.Contains(test.name, "[Suite:k8s]") { - filteredTests = append(filteredTests, test) - } - } - logrus.Infof("Discovered %d internal tests, %d external tests - %d total unique tests", - len(tests), len(externalTestCases), len(filteredTests)+len(externalTestCases)) - tests = append(filteredTests, externalTestCases...) - } else { - logrus.Infof("Using built-in tests only due to OPENSHIFT_SKIP_EXTERNAL_TESTS being set") + specs, err := allBinaries.ListTests(listContext, defaultBinaryParallelism, envFlags) + if err != nil { + return err } + logrus.Infof("Discovered %d total tests", len(specs)) + // Temporarily check for the presence of the [Skipped:xyz] annotation in the test names, once this synthetic test // begins to pass we can remove the annotation logic var annotatedSkipped []string - for _, t := range externalTestCases { - if strings.Contains(t.name, "[Skipped") { - annotatedSkipped = append(annotatedSkipped, t.name) + for _, t := range specs { + if strings.Contains(t.Name, "[Skipped") { + annotatedSkipped = append(annotatedSkipped, t.Name) } } + var fallbackSyntheticTestResult []*junitapi.JUnitTestCase var skippedAnnotationSyntheticTestResults []*junitapi.JUnitTestCase skippedAnnotationSyntheticTestResult := junitapi.JUnitTestCase{ Name: "[sig-trt] Skipped annotations present", @@ -248,19 +253,38 @@ func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, mon // If this fails, this additional run will make it flake skippedAnnotationSyntheticTestResults = append(skippedAnnotationSyntheticTestResults, &junitapi.JUnitTestCase{Name: skippedAnnotationSyntheticTestResult.Name}) + // skip tests due to newer k8s + restConfig, err := clusterinfo.GetMonitorRESTConfig() + if err != nil { + return err + } + + // Apply all test filters using the filter chain -- origin previously filtered tests a ton + // of places, and co-mingled suite, annotation, and cluster state filters in odd ways. This filter + // chain is the ONLY place tests should be filtered down for determining the final execution set. + testFilterChain := filters.NewFilterChain(logrus.WithField("component", "test-filter")). + AddFilter(filters.NewQualifiersFilter(suite.Qualifiers)). + AddFilter(filters.NewKubeRebaseTestsFilter(restConfig)). + AddFilter(&filters.DisabledTestsFilter{}). + AddFilter(filters.NewMatchFnFilter(suite.SuiteMatcher)). // used for file or regexp cli filter on test names + AddFilter(filters.NewClusterStateFilter(clusterConfig)) + + specs, err = testFilterChain.Apply(ctx, specs) + if err != nil { + return err + } + + tests, err := extensionTestSpecsToOriginTestCases(specs) + if err != nil { + return errors.WithMessage(err, "could not convert test specs to origin test cases") + } + // this ensures the tests are always run in random order to avoid // any intra-tests dependencies suiteConfig, _ := ginkgo.GinkgoConfiguration() r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) r.Shuffle(len(tests), func(i, j int) { tests[i], tests[j] = tests[j], tests[i] }) - tests = suite.Filter(tests) - if len(tests) == 0 { - return fmt.Errorf("suite %q does not contain any tests", suite.Name) - } - - logrus.Infof("Found %d filtered tests", len(tests)) - count := o.Count if count == 0 { count = suite.Count @@ -292,17 +316,6 @@ func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, mon return nil } - restConfig, err := clusterinfo.GetMonitorRESTConfig() - if err != nil { - return err - } - - // skip tests due to newer k8s - tests, err = o.filterOutRebaseTests(restConfig, tests) - if err != nil { - return err - } - if len(o.JUnitDir) > 0 { if _, err := os.Stat(o.JUnitDir); err != nil { if !os.IsNotExist(err) { @@ -503,29 +516,28 @@ func (o *GinkgoRunSuiteOptions) Run(suite *TestSuite, junitSuiteName string, mon pass, fail, skip, failing := summarizeTests(tests) - // attempt to retry failures to do flake detection + // Determine if we should retry any tests for flake detection + // Don't add more here without discussion with OCP architects, we should be moving towards not having any flakes + permittedRetryImageTags := []string{"tests"} // tests = openshift-tests image if fail > 0 && fail <= suite.MaximumAllowedFlakes { var retries []*testCase - // Make a copy of the all failing tests (subject to the max allowed flakes) so we can have - // a list of tests to retry. - failedExtensionTestCount := 0 + failedUnretriableTestCount := 0 for _, test := range failing { - // Do not retry extension tests -- we also want to remove retries from origin-sourced - // tests, but extensions is where we can start. - if test.binary != nil { - failedExtensionTestCount++ - continue - } - - retry := test.Retry() - retries = append(retries, retry) - if len(retries) > suite.MaximumAllowedFlakes { - break + if shouldRetryTest(ctx, test, permittedRetryImageTags) { + retry := test.Retry() + retries = append(retries, retry) + if len(retries) > suite.MaximumAllowedFlakes { + break + } + } else if test.binary != nil { + // Do not retry extension tests -- we also want to remove retries from origin-sourced + // tests, but extensions is where we can start. + failedUnretriableTestCount++ } } - logrus.Warningf("%d tests failed, %d origin-sourced tests will be retried; %d extension tests will not", len(failing), len(retries), failedExtensionTestCount) + logrus.Warningf("%d tests failed, %d tests permitted to be retried; %d failures are terminal non-retryable failures", len(failing), len(retries), failedUnretriableTestCount) // Run the tests in the retries list. q := newParallelTestQueue(testRunnerContext) @@ -729,42 +741,6 @@ func writeExtensionTestResults(tests []*testCase, dir, filePrefix, fileSuffix st return nil } -func (o *GinkgoRunSuiteOptions) filterOutRebaseTests(restConfig *rest.Config, tests []*testCase) ([]*testCase, error) { - discoveryClient, err := discovery.NewDiscoveryClientForConfig(restConfig) - if err != nil { - return nil, err - } - serverVersion, err := discoveryClient.ServerVersion() - if err != nil { - return nil, err - } - // TODO: this version along with below exclusions lists needs to be updated - // for the rebase in-progress. - if !strings.HasPrefix(serverVersion.Minor, "31") { - return tests, nil - } - - // Below list should only be filled in when we're trying to land k8s rebase. - // Don't pile them up! - exclusions := []string{ - // affected by the available controller split https://github.com/kubernetes/kubernetes/pull/126149 - `[sig-api-machinery] health handlers should contain necessary checks`, - } - - matches := make([]*testCase, 0, len(tests)) -outerLoop: - for _, test := range tests { - for _, excl := range exclusions { - if strings.Contains(test.name, excl) { - fmt.Fprintf(o.Out, "Skipping %q due to rebase in-progress\n", test.name) - continue outerLoop - } - } - matches = append(matches, test) - } - return matches, nil -} - func determineEnvironmentFlags(ctx context.Context, upgrade bool, dryRun bool) (extensions.EnvironmentFlags, error) { restConfig, err := e2e.LoadConfig(true) if err != nil { @@ -805,31 +781,11 @@ func determineEnvironmentFlags(ctx context.Context, upgrade bool, dryRun bool) ( envFlagBuilder.AddTopology(&singleReplicaTopology) } - clientConfig, err := clientconfigv1.NewForConfig(restConfig) - if err != nil { - return nil, err - } - - discoveryClient, err := kubeconfig.NewDiscoveryGetter(restConfig).GetDiscoveryClient() - if err != nil { - return nil, err - } - apiGroups, err := determineEnabledAPIGroups(discoveryClient) - if err != nil { - return nil, errors.WithMessage(err, "couldn't determine api groups") - } - envFlagBuilder.AddAPIGroups(apiGroups.UnsortedList()...) - - if apiGroups.Has("config.openshift.io") { - featureGates, err := determineEnabledFeatureGates(ctx, clientConfig) - if err != nil { - return nil, errors.WithMessage(err, "couldn't determine feature gates") - } - envFlagBuilder.AddFeatureGates(featureGates...) - } - //Additional flags can only be determined if we are able to obtain the clusterState if clusterState != nil { + envFlagBuilder.AddAPIGroups(clusterState.APIGroups.UnsortedList()...). + AddFeatureGates(clusterState.EnabledFeatureGates.UnsortedList()...) + upgradeType := "None" if upgrade { upgradeType = determineUpgradeType(clusterState.Version.Status) @@ -884,55 +840,3 @@ func determineExternalConnectivity(clusterConfig *clusterdiscovery.ClusterConfig } return "Direct" } - -func determineEnabledAPIGroups(discoveryClient discovery.AggregatedDiscoveryInterface) (sets.Set[string], error) { - groups, err := discoveryClient.ServerGroups() - if err != nil { - return nil, fmt.Errorf("unable to retrieve served resources: %v", err) - } - apiGroups := sets.New[string]() - for _, apiGroup := range groups.Groups { - // ignore the empty group - if apiGroup.Name == "" { - continue - } - apiGroups.Insert(apiGroup.Name) - } - - return apiGroups, nil -} - -func determineEnabledFeatureGates(ctx context.Context, configClient clientconfigv1.Interface) ([]string, error) { - featureGate, err := configClient.ConfigV1().FeatureGates().Get(ctx, "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err - } - clusterVersion, err := configClient.ConfigV1().ClusterVersions().Get(ctx, "version", metav1.GetOptions{}) - if err != nil { - return nil, err - } - - desiredVersion := clusterVersion.Status.Desired.Version - if len(desiredVersion) == 0 && len(clusterVersion.Status.History) > 0 { - desiredVersion = clusterVersion.Status.History[0].Version - } - - ret := sets.NewString() - found := false - for _, featureGateValues := range featureGate.Status.FeatureGates { - if featureGateValues.Version != desiredVersion { - continue - } - found = true - for _, enabled := range featureGateValues.Enabled { - ret.Insert(string(enabled.Name)) - } - break - } - if !found { - logrus.Warning("no feature gates found") - return nil, nil - } - - return ret.List(), nil -} diff --git a/pkg/test/ginkgo/cmd_runtest.go b/pkg/test/ginkgo/cmd_runtest.go deleted file mode 100644 index d1f30ad3c63b..000000000000 --- a/pkg/test/ginkgo/cmd_runtest.go +++ /dev/null @@ -1,195 +0,0 @@ -package ginkgo - -import ( - "context" - "fmt" - "github.com/sirupsen/logrus" - "os" - "regexp" - "strings" - "time" - - "github.com/openshift/origin/pkg/clioptions/clusterinfo" - - "github.com/openshift/origin/pkg/monitortestframework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/types" - "github.com/openshift/origin/pkg/defaultmonitortests" - "github.com/openshift/origin/pkg/monitor" - "github.com/openshift/origin/pkg/test/ginkgo/result" - "k8s.io/cli-runtime/pkg/genericclioptions" -) - -type ExitError struct { - Code int -} - -func (e ExitError) Error() string { - return fmt.Sprintf("exit with code %d", e.Code) -} - -// TestOptions handles running a single test. -type TestOptions struct { - // EnableMonitor is an easy way to enable monitor gathering for a single e2e test. - // TODO if this is useful enough for general users, we can extend this into an arg, this just ensures the plumbing. - EnableMonitor bool - - DryRun bool - genericclioptions.IOStreams - - ExactMonitorTests []string - DisableMonitorTests []string -} - -var _ ginkgo.GinkgoTestingT = &TestOptions{} - -func NewTestOptions(streams genericclioptions.IOStreams) *TestOptions { - return &TestOptions{ - IOStreams: streams, - } -} - -func (o *TestOptions) Run(args []string) error { - ctx := context.TODO() - - if len(args) != 1 { - return fmt.Errorf("only a single test name may be passed") - } - - start := time.Now() - - // Ignore the upstream suite behavior within test execution - ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes() - tests, err := testsForSuite() - if err != nil { - return err - } - var test *testCase - for _, t := range tests { - if t.name == args[0] { - test = t - break - } - } - if test == nil { - return fmt.Errorf("no test exists with that name: %s", args[0]) - } - - if o.DryRun { - fmt.Fprintf(o.Out, "Running test (dry-run)\n") - return nil - } - - restConfig, err := clusterinfo.GetMonitorRESTConfig() - if err != nil { - return err - } - monitorTestInfo := monitortestframework.MonitorTestInitializationInfo{ - ClusterStabilityDuringTest: monitortestframework.Stable, - ExactMonitorTests: o.ExactMonitorTests, - DisableMonitorTests: o.DisableMonitorTests, - } - var m monitor.Interface - if o.EnableMonitor { - // individual tests are always stable, it's the jobs that aren't. - monitorTests, err := defaultmonitortests.NewMonitorTestsFor(monitorTestInfo) - if err != nil { - logrus.Errorf("Error getting monitor tests: %v", err) - } - - monitorEventRecorder := monitor.NewRecorder() - m = monitor.NewMonitor( - monitorEventRecorder, - restConfig, - "", - monitorTests, - ) - if err := m.Start(ctx); err != nil { - return err - } - } - - suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration() - suiteConfig.FocusStrings = []string{fmt.Sprintf("^ %s$", regexp.QuoteMeta(test.name))} - - // These settings are matched to upstream's ginkgo configuration. See: - // https://github.com/kubernetes/kubernetes/blob/v1.25.0/test/e2e/framework/test_context.go#L354-L355 - // Randomize specs as well as suites - suiteConfig.RandomizeAllSpecs = true - // https://github.com/kubernetes/kubernetes/blob/v1.25.0/hack/ginkgo-e2e.sh#L172-L173 - suiteConfig.Timeout = 24 * time.Hour - reporterConfig.NoColor = true - reporterConfig.Verbose = true - - ginkgo.SetReporterConfig(reporterConfig) - - cwd, err := os.Getwd() - if err != nil { - return err - } - ginkgo.GetSuite().RunSpec(test.spec, ginkgo.Labels{}, "OpenShift e2e suite", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), suiteConfig, reporterConfig) - - if m != nil { - // ignore the resultstate of the monitor tests because we're only focused on a single one. - if _, err := m.Stop(ctx); err != nil { - return err - } - - timeSuffix := fmt.Sprintf("_%s", start.UTC().Format("20060102-150405")) - if err := m.SerializeResults(ctx, "missing-junit-suite", timeSuffix); err != nil { - fmt.Fprintf(o.ErrOut, "error: Failed to serialize run-data: %v\n", err) - } - } - - var summary types.SpecReport - for _, report := range ginkgo.GetSuite().GetReport().SpecReports { - if report.NumAttempts > 0 { - summary = report - } - } - - switch { - case summary.State == types.SpecStatePassed: - if s, ok := result.LastFlake(); ok { - fmt.Fprintf(o.ErrOut, "flake: %s\n", s) - return ExitError{Code: 4} - } - case summary.State == types.SpecStateSkipped: - if len(summary.Failure.Message) > 0 { - fmt.Fprintf(o.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) - } - if len(summary.Failure.ForwardedPanic) > 0 { - fmt.Fprintf(o.ErrOut, "skip [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) - } - return ExitError{Code: 3} - case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted: - if len(summary.Failure.ForwardedPanic) > 0 { - if len(summary.Failure.Location.FullStackTrace) > 0 { - fmt.Fprintf(o.ErrOut, "\n%s\n", summary.Failure.Location.FullStackTrace) - } - fmt.Fprintf(o.ErrOut, "fail [%s:%d]: Test Panicked: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic) - return ExitError{Code: 1} - } - fmt.Fprintf(o.ErrOut, "fail [%s:%d]: %s\n", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message) - return ExitError{Code: 1} - default: - return fmt.Errorf("unrecognized test case outcome: %#v", summary) - } - return nil -} - -func (o *TestOptions) Fail() { - // this function allows us to pass TestOptions as the first argument, - // it's empty becase we have failure check mechanism implemented above. -} - -func lastFilenameSegment(filename string) string { - if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { - return parts[len(parts)-1] - } - if parts := strings.Split(filename, "/src/"); len(parts) > 1 { - return parts[len(parts)-1] - } - return filename -} diff --git a/pkg/test/ginkgo/test_runner.go b/pkg/test/ginkgo/test_runner.go index 74852cbbef77..791cec0554bc 100644 --- a/pkg/test/ginkgo/test_runner.go +++ b/pkg/test/ginkgo/test_runner.go @@ -7,12 +7,12 @@ import ( "fmt" "io" "os" - "os/exec" "strings" "sync" - "syscall" "time" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift/origin/pkg/test/extensions" "k8s.io/kubernetes/test/e2e/framework" @@ -306,75 +306,40 @@ func (c *commandContext) RunTestInNewProcess(ctx context.Context, test *testCase ret.start = time.Now() testEnv := append(os.Environ(), updateEnvVars(c.env)...) - if test.binary != nil { - results := test.binary.RunTests(ctx, c.timeout, testEnv, test.name) - if len(results) != 1 { - fmt.Fprintf(os.Stderr, "warning: expected 1 result from external binary; received %d", len(results)) - } - switch results[0].Result { - case extensions.ResultFailed: - ret.testState = TestFailed - ret.testOutputBytes = []byte(fmt.Sprintf("%s\n%s", results[0].Output, results[0].Error)) - case extensions.ResultPassed: - ret.testState = TestSucceeded - case extensions.ResultSkipped: - ret.testState = TestSkipped - ret.testOutputBytes = []byte(results[0].Output) - } - ret.start = extensions.Time(results[0].StartTime) - ret.end = extensions.Time(results[0].EndTime) - ret.extensionTestResult = results[0] - return ret + // Everything's been migrated to OTE, including origin itself, test spec must have a binary set + if test.binary == nil { + ret.testState = TestFailed + ret.testOutputBytes = []byte("test has no binary configured; this should not be possible") } - testName := test.rawName - if testName == "" { - testName = test.name - } - - command := exec.Command(os.Args[0], "run-test", testName) - command.Env = testEnv - timeout := c.timeout - if test.testTimeout != 0 { + if test.testTimeout > 0 { timeout = test.testTimeout } - testOutputBytes, err := runWithTimeout(ctx, command, timeout) - ret.end = time.Now() - - ret.testOutputBytes = testOutputBytes - if err == nil { - ret.testState = TestSucceeded - return ret + results := test.binary.RunTests(ctx, timeout, testEnv, test.name) + if len(results) != 1 { + fmt.Fprintf(os.Stderr, "warning: expected 1 result from external binary; received %d", len(results)) } - - if ctx.Err() != nil { - ret.testState = TestSkipped + if len(results) == 0 { + ret.testState = TestFailed + ret.testOutputBytes = []byte("no results from external binary") return ret } - if exitErr, ok := err.(*exec.ExitError); ok { - switch exitErr.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() { - case 1: - // failed - ret.testState = TestFailed - case 2: - // timeout (ABRT is an exit code 2) - ret.testState = TestFailedTimeout - case 3: - // skipped - ret.testState = TestSkipped - case 4: - // flaky, do not retry - ret.testState = TestFlaked - default: - ret.testState = TestUnknown - } - return ret + switch results[0].Result { + case extensiontests.ResultFailed: + ret.testState = TestFailed + ret.testOutputBytes = []byte(fmt.Sprintf("%s\n%s", results[0].Output, results[0].Error)) + case extensiontests.ResultPassed: + ret.testState = TestSucceeded + case extensiontests.ResultSkipped: + ret.testState = TestSkipped + ret.testOutputBytes = []byte(results[0].Output) } - - ret.testState = TestFailed + ret.start = extensions.Time(results[0].StartTime) + ret.end = extensions.Time(results[0].EndTime) + ret.extensionTestResult = results[0] return ret } @@ -398,35 +363,5 @@ func updateEnvVars(envs []string) []string { } provider, _ := json.Marshal(config) result = append(result, fmt.Sprintf("TEST_PROVIDER=%s", provider)) - // TODO: do we need to inject KUBECONFIG? - // result = append(result, "KUBECONFIG=%s", ) return result } - -func runWithTimeout(ctx context.Context, c *exec.Cmd, timeout time.Duration) ([]byte, error) { - if timeout > 0 { - go func() { - select { - // interrupt tests after timeout, and abort if they don't complete quick enough - case <-time.After(timeout): - if c.Process != nil { - c.Process.Signal(syscall.SIGINT) - } - // if the process appears to be hung a significant amount of time after the timeout - // send an ABRT so we get a stack dump - select { - case <-time.After(time.Minute): - if c.Process != nil { - c.Process.Signal(syscall.SIGABRT) - } - } - case <-ctx.Done(): - if c.Process != nil { - c.Process.Signal(syscall.SIGINT) - } - } - - }() - } - return c.CombinedOutput() -} diff --git a/pkg/test/ginkgo/test_suite.go b/pkg/test/ginkgo/test_suite.go index 96eb970dae0c..216680e9e813 100644 --- a/pkg/test/ginkgo/test_suite.go +++ b/pkg/test/ginkgo/test_suite.go @@ -4,80 +4,38 @@ import ( "regexp" "time" - "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" - - "k8s.io/apimachinery/pkg/util/errors" - - k8sgenerated "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/openshift/origin/pkg/test/extensions" - origingenerated "github.com/openshift/origin/test/extended/util/annotate/generated" ) -func testsForSuite() ([]*testCase, error) { - var tests []*testCase - var errs []error - - // Don't build the tree multiple times, it results in multiple initing of tests - if !ginkgo.GetSuite().InPhaseBuildTree() { - ginkgo.GetSuite().BuildTree() - } - - ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) { - // we need to ensure the default path always annotates both - // origin and k8s tests accordingly, since each of these - // currently have their own annotations which are not - // merged anywhere else but applied here - if append, ok := origingenerated.Annotations[name]; ok { - spec.AppendText(append) - } - if append, ok := k8sgenerated.Annotations[name]; ok { - spec.AppendText(append) - } - tc, err := newTestCaseFromGinkgoSpec(spec) - if err != nil { - errs = append(errs, err) - } - tests = append(tests, tc) - }) - if len(errs) > 0 { - return nil, errors.NewAggregate(errs) - } - return tests, nil -} - var re = regexp.MustCompile(`.*\[Timeout:(.[^\]]*)\]`) -func externalBinaryTestsToOriginTestCases(specs extensions.ExtensionTestSpecs) []*testCase { +func extensionTestSpecsToOriginTestCases(specs extensions.ExtensionTestSpecs) ([]*testCase, error) { var tests []*testCase for _, spec := range specs { - tests = append(tests, &testCase{ + tc := &testCase{ name: spec.Name, rawName: spec.Name, binary: spec.Binary, - }) - } - return tests -} - -func newTestCaseFromGinkgoSpec(spec types.TestSpec) (*testCase, error) { - name := spec.Text() - tc := &testCase{ - name: name, - locations: spec.CodeLocations(), - spec: spec, - } + } - if match := re.FindStringSubmatch(name); match != nil { - testTimeOut, err := time.ParseDuration(match[1]) - if err != nil { - return nil, err + // Override timeout from suite with `[Timeout:X]` duration + if match := re.FindStringSubmatch(tc.name); match != nil { + testTimeOut, err := time.ParseDuration(match[1]) + if err != nil { + return nil, errors.WithMessage(err, "failed to parse test timeout") + } + logrus.WithField("test", tc.name).Debugf("Overriding test timeout to %s", testTimeOut) + tc.testTimeout = testTimeOut } - tc.testTimeout = testTimeOut + + tests = append(tests, tc) } - return tc, nil + return tests, nil } type testCase struct { @@ -145,30 +103,46 @@ var ( Disruptive ClusterStabilityDuringTest = "Disruptive" ) +type Kind int + +const ( + KindInternal Kind = iota + KindExternal +) + type TestSuite struct { - Name string - Description string + Name string `json:"name"` + Description string `json:"description"` + Kind Kind `json:"kind"` - Matches TestMatchFunc + SuiteMatcher TestMatchFunc `json:"-"` // The number of times to execute each test in this suite. - Count int + Count int `json:"count,omitempty"` // The maximum parallelism of this suite. - Parallelism int + Parallelism int `json:"parallelism,omitempty"` // The number of flakes that may occur before this test is marked as a failure. - MaximumAllowedFlakes int + MaximumAllowedFlakes int `json:"maximumAllowedFlakes,omitempty"` - ClusterStabilityDuringTest ClusterStabilityDuringTest + ClusterStabilityDuringTest ClusterStabilityDuringTest `json:"clusterStabilityDuringTest,omitempty"` - TestTimeout time.Duration + TestTimeout time.Duration `json:"testTimeout,omitempty"` + + // OTE + Qualifiers []string `json:"qualifiers,omitempty"` + Extension *extensions.Extension `json:"-"` } type TestMatchFunc func(name string) bool func (s *TestSuite) Filter(tests []*testCase) []*testCase { + if s.SuiteMatcher == nil { + return tests + } + matches := make([]*testCase, 0, len(tests)) for _, test := range tests { - if !s.Matches(test.name) { + if !s.SuiteMatcher(test.name) { continue } matches = append(matches, test) @@ -180,13 +154,13 @@ func (s *TestSuite) AddRequiredMatchFunc(matchFn TestMatchFunc) { if matchFn == nil { return } - if s.Matches == nil { - s.Matches = matchFn + if s.SuiteMatcher == nil { + s.SuiteMatcher = matchFn return } - originalMatchFn := s.Matches - s.Matches = func(name string) bool { + originalMatchFn := s.SuiteMatcher + s.SuiteMatcher = func(name string) bool { return originalMatchFn(name) && matchFn(name) } } diff --git a/pkg/testsuites/cni.go b/pkg/testsuites/cni.go deleted file mode 100644 index a5984b83f975..000000000000 --- a/pkg/testsuites/cni.go +++ /dev/null @@ -1,30 +0,0 @@ -package testsuites - -import ( - "strings" -) - -// Determines whether a test should be run for third-party network plugin conformance testing -func inCNISuite(name string) bool { - if strings.Contains(name, "[Suite:k8s]") && strings.Contains(name, "[sig-network]") { - // Run all upstream sig-network conformance tests - if strings.Contains(name, "[Conformance]") { - return true - } - - // Run all upstream NetworkPolicy tests except named port tests. (Neither - // openshift-sdn nor ovn-kubernetes supports named ports in NetworkPolicy, - // so we don't require third party tests to support them either.) - if strings.Contains(name, "NetworkPolicy") && !strings.Contains(name, "named port") { - return true - } - - // Include dual-stack tests in the test suite; they will automatically get - // filtered out if the cluster is single-stack. - if strings.Contains(name, "[Feature:IPv6DualStack]") { - return true - } - } - - return false -} diff --git a/pkg/testsuites/filters.go b/pkg/testsuites/filters.go index 10a33ff8998e..d1efa25efcf3 100644 --- a/pkg/testsuites/filters.go +++ b/pkg/testsuites/filters.go @@ -3,9 +3,7 @@ package testsuites import ( "bytes" "fmt" - "regexp" "strings" - "time" "github.com/openshift/origin/pkg/test/ginkgo" ) @@ -16,49 +14,37 @@ func SuitesString(suites []*ginkgo.TestSuite, prefix string) string { buf := &bytes.Buffer{} fmt.Fprintf(buf, prefix) for _, suite := range suites { - fmt.Fprintf(buf, "%s\n %s\n\n", suite.Name, suite.Description) - } - return buf.String() -} - -func isDisabled(name string) bool { - if strings.Contains(name, "[Disabled") { - return true - } - - return shouldSkipUntil(name) -} + fmt.Fprintf(buf, "%s\n", suite.Name) -// shouldSkipUntil allows a test to be skipped with a time limit. -// the test should be annotated with the 'SkippedUntil' tag, as shown below. -// -// [SkippedUntil:05092022:blocker-bz/123456] -// -// - the specified date should conform to the 'MMDDYYYY' format. -// - a valid blocker BZ must be specified -// if the specified date in the tag has not passed yet, the test -// will be skipped by the runner. -func shouldSkipUntil(name string) bool { - re, err := regexp.Compile(`\[SkippedUntil:(\d{8}):blocker-bz\/([a-zA-Z0-9]+)\]`) - if err != nil { - // it should only happen with a programmer error and unit - // test will prevent that - return false - } - matches := re.FindStringSubmatch(name) - if len(matches) != 3 { - return false - } + // Add source information + if suite.Extension != nil { + fmt.Fprintf(buf, " Source: Extension (%s:%s:%s)\n", suite.Extension.Component.Product, suite.Extension.Component.Kind, suite.Extension.Component.Name) + if suite.Extension.Source.SourceImage != "" { + fmt.Fprintf(buf, " Image: %s\n", suite.Extension.Source.SourceImage) + } + if suite.Extension.Source.SourceURL != "" { + fmt.Fprintf(buf, " URL: %s\n", suite.Extension.Source.SourceURL) + } + } else { + fmt.Fprintf(buf, " Source: Internal\n") + } - skipUntil, err := time.Parse("01022006", matches[1]) - if err != nil { - return false - } + // Add description with proper indentation + if suite.Description != "" { + // Split description into lines and indent each line + lines := strings.Split(strings.TrimSpace(suite.Description), "\n") + fmt.Fprintf(buf, " Description:\n") + for _, line := range lines { + trimmedLine := strings.TrimSpace(line) + if trimmedLine != "" { + fmt.Fprintf(buf, " %s\n", trimmedLine) + } + } + } - if skipUntil.After(time.Now()) { - return true + fmt.Fprintf(buf, "\n") } - return false + return buf.String() } // isStandardEarlyTest returns true if a test is considered part of the normal @@ -78,3 +64,29 @@ func isStandardEarlyOrLateTest(name string) bool { } return strings.Contains(name, "[Suite:openshift/conformance/parallel") } + +// withStandardEarlyOrLateTests combines a CEL expression with the standard early/late test logic. +// It returns a CEL expression that matches tests that either satisfy the provided expression +// OR are standard early/late tests. +func withStandardEarlyOrLateTests(expr string) string { + earlyLateExpr := `(name.contains("[Early]") || name.contains("[Late]")) && name.contains("[Suite:openshift/conformance/parallel")` + + if expr == "" { + return earlyLateExpr + } + + return fmt.Sprintf("(%s) || (%s)", expr, earlyLateExpr) +} + +// withStandardEarlyTests combines a CEL expression with the standard early test logic. +// It returns a CEL expression that matches tests that either satisfy the provided expression +// OR are standard early tests. +func withStandardEarlyTests(expr string) string { + earlyExpr := `name.contains("[Early]") && name.contains("[Suite:openshift/conformance/parallel")` + + if expr == "" { + return earlyExpr + } + + return fmt.Sprintf("(%s) || (%s)", expr, earlyExpr) +} diff --git a/pkg/testsuites/minimal.go b/pkg/testsuites/minimal.go deleted file mode 100644 index c6d360bda417..000000000000 --- a/pkg/testsuites/minimal.go +++ /dev/null @@ -1,2684 +0,0 @@ -package testsuites - -// This file is updated via the output from https://sippy.ci.openshift.org/api/canary?release=$VERSION -var ( - minimal = map[string]struct{}{ - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group search view displays created resource instance": {}, - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group edits the resource instance": {}, - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group displays detail view for newly created resource instance": {}, - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group displays a list view for the resource": {}, - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group deletes the resource instance": {}, - "user.openshift.io~v1~Group.Kubernetes resource CRUD operations user.openshift.io~v1~Group creates the resource instance": {}, - "upgrade": {}, - "storageClass related feature.Setting mountOptions for StorageClass (outline example : | vsphere | thin |)": {}, - "storageClass related feature.Setting mountOptions for StorageClass (outline example : | gce | standard |)": {}, - "storageClass related feature.Setting mountOptions for StorageClass (outline example : | ebs | gp2 |)": {}, - "storageClass related feature.PVC with storage class will not provision io1 pv with wrong parameters for aws ebs volume": {}, - "storage security check.secret volume security check": {}, - "storage security check.[origin_infra_20] volume security testing (outline example : | gcePersistentDisk | pdName | gce |)": {}, - "storage security check.[origin_infra_20] volume security testing (outline example : | cinder | volumeID | cinder |)": {}, - "storage security check.[origin_infra_20] volume security testing (outline example : | awsElasticBlockStore | volumeID | ebs |)": {}, - "storage security check.Run pod with specific user/group by using securityContext (outline example : | runAsUser |)": {}, - "storage security check.Run pod with specific user/group by using securityContext (outline example : | fsGroup |)": {}, - "storage security check.Run pod with specific SELinux by using seLinuxOptions in securityContext": {}, - "stibuild.feature.Trigger s2i/docker/custom build using additional imagestream (outline example : | ocp12041-s2i.json |)": {}, - "stibuild.feature.Mount source secret and configmap to builder container- sourcestrategy": {}, - "stibuild.feature.Image source extraction w/ symlink should success when running a build": {}, - "stibuild.feature.Creates a new application based on the source code in a private remote repository": {}, - "stibuild.feature.Create a build configuration based on a private remote git repository": {}, - "statefulsets.Add storage is applicable for all workloads statefulsets create a statefulsets resource and adds storage to it": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent search view displays created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent edits the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent displays detail view for newly created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent displays a list view for the resource": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent deletes the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotContent.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotContent creates the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass search view displays created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass edits the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass displays detail view for newly created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass displays a list view for the resource": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass deletes the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshotClass.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshotClass creates the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot search view displays created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot edits the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot displays detail view for newly created resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot displays a list view for the resource": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot deletes the resource instance": {}, - "snapshot.storage.k8s.io~v1~VolumeSnapshot.Kubernetes resource CRUD operations snapshot.storage.k8s.io~v1~VolumeSnapshot creates the resource instance": {}, - "secrets related scenarios.Consume secret via volume plugin with multiple volumes": {}, - "scaling related scenarios.[openshift-sme]When rolling deployments the pod should shutdown gracefully": {}, - "resouces related scenarios.Return description of resources with cli describe": {}, - "replicationcontrollers.Add storage is applicable for all workloads replicationcontrollers create a replicationcontrollers resource and adds storage to it": {}, - "replicasets.Add storage is applicable for all workloads replicasets create a replicasets resource and adds storage to it": {}, - "release-openshift-origin-installer-e2e-gcp-shared-vpc-4.10.Pod": {}, - "release-openshift-origin-installer-e2e-azure-shared-vpc-4.10.Pod": {}, - "release-openshift-origin-installer-e2e-aws-upgrade-4.7-to-4.8-to-4.9-to-4.10-ci.Pod": {}, - "release-openshift-origin-installer-e2e-aws-shared-vpc-4.10.Pod": {}, - "release-openshift-origin-installer-e2e-aws-shared-vpc-4.10.Overall": {}, - "release-openshift-origin-installer-e2e-aws-disruptive-4.10.Pod": {}, - "release-openshift-ocp-osd-gcp-nightly-4.10.Pod": {}, - "release-openshift-ocp-osd-aws-nightly-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-metal-serial-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-metal-compact-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-metal-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-gcp-serial-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-azure-serial-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-aws-upi-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-aws-mirrors-4.10.Pod": {}, - "release-openshift-ocp-installer-e2e-aws-csi-4.10.Pod": {}, - "promote-release-openshift-machine-os-content-e2e-aws-4.10.Pod": {}, - "promote-release-openshift-machine-os-content-e2e-aws-4.10-s390x.Pod": {}, - "promote-release-openshift-machine-os-content-e2e-aws-4.10-ppc64le.Pod": {}, - "projects related features via cli.User can get node selector from a project": {}, - "projects related features via cli.Could remove user and group from the current project": {}, - "periodic-ci-openshift-verification-tests-master-ocp-4.10-e2e-vsphere-cucushift-ipi.Pod": {}, - "periodic-ci-openshift-verification-tests-master-ocp-4.10-e2e-gcp-cucushift-upi.Pod": {}, - "periodic-ci-openshift-verification-tests-master-ocp-4.10-e2e-gcp-cucushift-upi.Overall": {}, - "periodic-ci-openshift-verification-tests-master-ocp-4.10-e2e-gcp-cucushift-ipi.Pod": {}, - "periodic-ci-openshift-verification-tests-master-ocp-4.10-e2e-aws-cucushift-ipi.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-upgrade-from-stable-4.9-e2e-metal-ipi-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-upgrade-from-stable-4.9-e2e-aws-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-upgrade-from-stable-4.8-e2e-aws-upgrade-paused.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-upi.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-upi-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-techpreview.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-techpreview-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-vsphere-ovn.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-ovirt.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-ovirt-ovn.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-openstack-proxy.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-openstack-fips.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-openstack-fips.Overall": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-openstack-az.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-single-node-live-iso.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-virtualmedia.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-serial-ipv4.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-ovn-ipv6.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-ovn-dualstack.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-ovn-dualstack-local-gateway.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-compact.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-ipi-compact.Overall": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-assisted.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-metal-assisted-ipv6.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-gcp.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-gcp-rt.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-gcp-libvirt-cert-rotation.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-gcp-fips.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-gcp-fips-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-azure.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-azure-upgrade-cnv.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-azure-fips.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-azure-fips-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-azure-deploy-cnv.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-workers-rhel8.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-workers-rhel7.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-single-node.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-single-node-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-proxy.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-ovn-local-gateway.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-ovn-local-gateway.Overall": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-fips.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-fips-serial.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-fips-serial.Overall": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-cgroupsv2.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-e2e-aws-canary.Pod": {}, - "periodic-ci-openshift-release-master-nightly-4.10-console-aws.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-from-stable-4.8-e2e-aws-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-vsphere-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-ovirt-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-openstack-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-gcp-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-gcp-ovn-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-azure-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-azure-ovn-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-uwm.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-upgrade-rollback.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-ovn-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-ovn-upgrade-rollback.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-upgrade-from-stable-4.9-e2e-aws-compact-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-techpreview-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-techpreview-parallel.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-parallel.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-ovn.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-openstack-kuryr.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-network-migration.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-network-migration-rollback.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-techpreview.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-techpreview-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-ovn.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-compact.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-compact-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-gcp-compact-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-upgrade-single-node.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-techpreview.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-techpreview-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-ovn.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-ovn-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-compact.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-compact-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-compact-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-azure-cilium.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-upgrade-single-node.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-upgrade-rollback.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-techpreview.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-techpreview-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-sdn-multitenant.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-ovn.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-ovn-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-ovn-network-stress.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-network-stress.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-hypershift.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-compact.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-compact-upgrade.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-compact-serial.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-cgroupsv2.Pod": {}, - "periodic-ci-openshift-release-master-ci-4.10-e2e-aws-calico.Pod": {}, - "periodic-ci-openshift-multiarch-master-nightly-4.10-ocp-installer-remote-libvirt-s390x.Pod": {}, - "periodic-ci-openshift-multiarch-master-nightly-4.10-ocp-installer-remote-libvirt-ppc64le.Pod": {}, - "periodic-ci-openshift-multiarch-master-nightly-4.10-ocp-e2e-aws-arm64.Pod": {}, - "operator.Tag the image machine-os-content into the image stream tag stable:machine-os-content": {}, - "operator.Run template e2e-gcp - e2e-gcp container setup": {}, - "operator.Run template e2e-azure - e2e-azure container setup": {}, - "operator.Run template e2e-aws-upgrade - e2e-aws-upgrade container setup": {}, - "operator.Run template e2e-aws-disruptive - e2e-aws-disruptive container setup": {}, - "operator.Run template e2e-aws - e2e-aws container setup": {}, - "operator.Run multi-stage test e2e-vsphere-upi-serial - e2e-vsphere-upi-serial-upi-install-vsphere container test": {}, - "operator.Run multi-stage test e2e-vsphere-upi - e2e-vsphere-upi-upi-install-vsphere container test": {}, - "operator.Run multi-stage test e2e-vsphere-cucushift-ipi - e2e-vsphere-cucushift-ipi-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-vsphere - e2e-vsphere-ipi-install-vsphere-registry container test": {}, - "operator.Run multi-stage test e2e-ovirt-ovn - e2e-ovirt-ovn-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-upgrade - e2e-openstack-upgrade-ipi-install-rbac container test": {}, - "operator.Run multi-stage test e2e-openstack-upgrade - e2e-openstack-upgrade-ipi-install-monitoringpvc-annotate container test": {}, - "operator.Run multi-stage test e2e-openstack-upgrade - e2e-openstack-upgrade-ipi-install-monitoringpvc container test": {}, - "operator.Run multi-stage test e2e-openstack-upgrade - e2e-openstack-upgrade-ipi-install-install-stableinitial container test": {}, - "operator.Run multi-stage test e2e-openstack-upgrade - e2e-openstack-upgrade-ipi-install-hosted-loki container test": {}, - "operator.Run multi-stage test e2e-openstack-techpreview-serial - e2e-openstack-techpreview-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-techpreview-parallel - e2e-openstack-techpreview-parallel-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-proxy - e2e-openstack-proxy-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-parallel - e2e-openstack-parallel-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-ovn - e2e-openstack-ovn-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-kuryr - e2e-openstack-kuryr-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-fips - e2e-openstack-fips-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-openstack-az - e2e-openstack-az-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-network-migration-rollback - e2e-network-migration-rollback-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-network-migration - e2e-network-migration-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-metal-ipi-virtualmedia - e2e-metal-ipi-virtualmedia-baremetalds-devscripts-setup container test": {}, - "operator.Run multi-stage test e2e-metal-ipi-upgrade - e2e-metal-ipi-upgrade-baremetalds-devscripts-setup container test": {}, - "operator.Run multi-stage test e2e-metal-ipi-ovn-dualstack-local-gateway - e2e-metal-ipi-ovn-dualstack-local-gateway-baremetalds-devscripts-setup container test": {}, - "operator.Run multi-stage test e2e-metal-ipi-ovn-dualstack - e2e-metal-ipi-ovn-dualstack-baremetalds-devscripts-setup container test": {}, - "operator.Run multi-stage test e2e-metal-ipi-compact - e2e-metal-ipi-compact-baremetalds-packet-teardown container test": {}, - "operator.Run multi-stage test e2e-metal-ipi - e2e-metal-ipi-baremetalds-devscripts-setup container test": {}, - "operator.Run multi-stage test e2e-metal-assisted-ipv6 - e2e-metal-assisted-ipv6-baremetalds-packet-setup container test": {}, - "operator.Run multi-stage test e2e-gcp-ovn - e2e-gcp-ovn-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-gcp-fips - e2e-gcp-fips-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-gcp-cucushift-ipi - e2e-gcp-cucushift-ipi-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-upgrade-single-node - e2e-azure-upgrade-single-node-ipi-install-install-stableinitial container test": {}, - "operator.Run multi-stage test e2e-azure-upgrade - e2e-azure-upgrade-ipi-conf-azure container test": {}, - "operator.Run multi-stage test e2e-azure-fips-serial - e2e-azure-fips-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-fips - e2e-azure-fips-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-deploy-cnv - e2e-azure-deploy-cnv-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-compact-serial - e2e-azure-compact-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-compact - e2e-azure-compact-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-azure-cilium - e2e-azure-cilium-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-workers-rhel7 - e2e-aws-workers-rhel7-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-upgrade-single-node - e2e-aws-upgrade-single-node-ipi-install-install-stableinitial container test": {}, - "operator.Run multi-stage test e2e-aws-upgrade-paused - e2e-aws-upgrade-paused-ipi-install-install-stableinitial container test": {}, - "operator.Run multi-stage test e2e-aws-techpreview-serial - e2e-aws-techpreview-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-techpreview - e2e-aws-techpreview-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-ovn-network-stress - e2e-aws-ovn-network-stress-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-ovn-local-gateway - e2e-aws-ovn-local-gateway-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-ovn - e2e-aws-ovn-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-network-stress - e2e-aws-network-stress-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-hypershift - e2e-aws-hypershift-ipi-install-rbac container test": {}, - "operator.Run multi-stage test e2e-aws-fips-serial - e2e-aws-fips-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-fips - e2e-aws-fips-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-compact-upgrade - e2e-aws-compact-upgrade-ipi-install-install-stableinitial container test": {}, - "operator.Run multi-stage test e2e-aws-compact-serial - e2e-aws-compact-serial-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-compact - e2e-aws-compact-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-cgroupsv2 - e2e-aws-cgroupsv2-ipi-install-install container test": {}, - "operator.Run multi-stage test e2e-aws-canary - e2e-aws-canary-ipi-install-install container test": {}, - "operator.Run multi-stage test console-aws - console-aws-ipi-install-install container test": {}, - "operator.Find the input image upi-installer and tag it into the pipeline": {}, - "operator.Find the input image root and tag it into the pipeline": {}, - "operator.Find the input image origin-centos-8 and tag it into the pipeline": {}, - "operator.Find the input image openstack-installer and tag it into the pipeline": {}, - "operator.Find the input image ocp-cli-jq-latest and tag it into the pipeline": {}, - "operator.Find the input image ocp-4.5-upi-installer and tag it into the pipeline": {}, - "operator.Find the input image machine-os-content and tag it into the pipeline": {}, - "operator.Find the input image libvirt-installer and tag it into the pipeline": {}, - "operator.Find the input image hypershift-hypershift-latest and tag it into the pipeline": {}, - "operator.Find the input image dev-scripts and tag it into the pipeline": {}, - "operator.Find the input image console-tests and tag it into the pipeline": {}, - "operator.Find the input image cnv-ci-src-upgrade and tag it into the pipeline": {}, - "operator.Find the input image cnv-ci-src and tag it into the pipeline": {}, - "operator.Find the input image ci-verification-tests-latest and tag it into the pipeline": {}, - "operator.Find the input image assisted-test-infra and tag it into the pipeline": {}, - "operator.Find the input image assisted-service and tag it into the pipeline": {}, - "operator.Find the input image assisted-installer-controller and tag it into the pipeline": {}, - "operator.Find the input image assisted-installer-agent and tag it into the pipeline": {}, - "operator.Find the input image assisted-installer and tag it into the pipeline": {}, - "operator.Find the input image ansible and tag it into the pipeline": {}, - "operator.Find all of the input images from ocp/4.7:${component} and tag them into the output image stream": {}, - "operator.Find all of the input images from ocp/4.10:${component} and tag them into the output image stream": {}, - "operator.Create the release image \"latest\" containing all images built by this job": {}, - "operator.Create the release image \"initial\" containing all images built by this job": {}, - "operator.All images are built and tagged into stable": {}, - "operator install page": {}, - "operator conditions storage": {}, - "operator conditions service-ca": {}, - "operator conditions page": {}, - "operator conditions operator-lifecycle-manager-packageserver": {}, - "operator conditions operator-lifecycle-manager-catalog": {}, - "operator conditions operator-lifecycle-manager": {}, - "operator conditions openshift-samples": {}, - "operator conditions openshift-controller-manager": {}, - "operator conditions node-tuning": {}, - "operator conditions marketplace": {}, - "operator conditions machine-approver": {}, - "operator conditions kube-scheduler": {}, - "operator conditions kube-apiserver": {}, - "operator conditions insights": {}, - "operator conditions dns": {}, - "operator conditions csi-snapshot-controller": {}, - "operator conditions console": {}, - "operator conditions config-operator": {}, - "operator conditions cluster-autoscaler": {}, - "operator conditions cloud-credential": {}, - "operator conditions cloud-controller-manager": {}, - "operator conditions baremetal": {}, - "oc_login.feature.Warning should be displayed when login failed via oc login": {}, - "oc_expose.feature.Access app througth secure service and regenerate service serving certs if it about to expire": {}, - "oc new-app related scenarios.oc new-app/new-build should respect ImageStream hidden tag": {}, - "oc import-image related feature.Tags should be added to ImageStream if image repository is from an external docker registry": {}, - "oc import-image related feature.Do not create tags for ImageStream if image repository does not have tags": {}, - "oc import-image related feature.Could not import the tag when reference is true": {}, - "oc idle.CLI - Idle service with dry-run": {}, - "oc idle.CLI - Idle service from file": {}, - "oc idle.CLI - Idle service by label": {}, - "oc idle.CLI - Idle all the service in the same project": {}, - "oc debug related scenarios.oc should be able to debug init container": {}, - "jenkins.feature.new-app/new-build support for pipeline buildconfigs": {}, - "jenkins.feature.Using nodejs slave when do jenkinspipeline strategy (outline example : | 2 |)": {}, - "deployments.Add storage is applicable for all workloads deployments create a deployments resource and adds storage to it": {}, - "deploymentconfigs.Add storage is applicable for all workloads deploymentconfigs create a deploymentconfigs resource and adds storage to it": {}, - "daemonsets.Add storage is applicable for all workloads daemonsets create a daemonsets resource and adds storage to it": {}, - "build 'apps' with CLI.Handle build naming collisions": {}, - "build 'apps' with CLI.Create applications only with multiple db images": {}, - "build 'apps' with CLI.Cannot create secret from local file and with same name via oc new-build": {}, - "[sig-storage] [sig-api-machinery] configmap-upgrade": {}, - "[sig-storage] [Serial] Volume metrics should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-storage] [Serial] Volume metrics should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-storage] Volumes NFSv4 should be mountable for NFSv4 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Volumes NFSv3 should be mountable for NFSv3 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Volumes ConfigMap should be mountable [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local Pods sharing a single local PV [Serial] all pods should be running [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes GCEPD should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Mounted volume expand Should verify mounted devices can be resized [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] HostPath should support subPath [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] HostPath should support r/w [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs) [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume storage capacity unlimited [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume storage capacity exhausted, late binding, with topology [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume storage capacity exhausted, late binding, no topology [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume storage capacity exhausted, immediate binding [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIStorageCapacity CSIStorageCapacity used, no capacity [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIStorageCapacity CSIStorageCapacity used, have capacity [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIStorageCapacity CSIStorageCapacity disabled [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI workload information using mock driver should not be passed when podInfoOnMount=false [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI workload information using mock driver should be passed when podInfoOnMount=true [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI attach test using mock driver should require VolumeAttach for drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI attach test using mock driver should preserve attachment policy when no CSIDriver present [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI attach test using mock driver should not require VolumeAttach for drivers without attachment [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which share the same volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which share the same volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] [Slow] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] pods should not fail on systemd timeouts": {}, - "[sig-node] pods should never transition back to pending": {}, - "[sig-node] overlapping apiserver process detected during kube-apiserver rollout": {}, - "[sig-node] kubelet terminates kube-apiserver gracefully": {}, - "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s. [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support unsafe sysctls which are actually allowed [MinimumKubeletVersion:1.21] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Secrets should patch a secret [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] PreStop should call prestop when killing a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should support pod readiness gates [NodeFeature:PodReadinessGate] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should get a host IP [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should delete a collection of pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Pods Extended Pod Container Status should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Pods Extended Delete Grace Period should be submitted and removed [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] PodTemplates should delete a collection of pod templates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] NodeLease when the NodeLease feature is enabled the kubelet should report node status infrequently [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] NodeLease when the NodeLease feature is enabled the kubelet should create and update a lease in the kube-node-lease namespace [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] NodeLease when the NodeLease feature is enabled should have OwnerReferences set [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]": {}, - "[sig-node] Lease lease API should be available [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Kubelet when scheduling a busybox Pod with hostAliases should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Docker Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Docker Containers should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Docker Containers should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] ConfigMap should update ConfigMap successfully [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] pods should successfully create sandboxes by writing child": {}, - "[sig-network] pods should successfully create sandboxes by reading container": {}, - "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should test the lifecycle of an Endpoint [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should serve multiport endpoints from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should release NodePorts on delete [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should provide secure master service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should prevent NodePort collisions [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should find a service from listing all namespaces [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should create endpoints for unready pods [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should complete a service status lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should check NodePort out-of-range [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should be rejected when no endpoints exist [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to create a functioning NodePort service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should allow pods to hairpin back to themselves through services [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Service endpoints latency should not be very high [Conformance] [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-network] SCTP [Feature:SCTP] [LinuxOnly] should create a Pod with SCTP HostPort [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] SCTP [Feature:SCTP] [LinuxOnly] should allow creating a basic SCTP service with pod and endpoints [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:Disconnected] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicy API should support creating NetworkPolicy API operations [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] IngressClass [Feature:Ingress] should prevent Ingress creation if more than 1 IngressClass marked as default [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Ingress API should support creating Ingress API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should support configurable pod resolv.conf [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] DNS should support configurable pod DNS nameservers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for the cluster [Provider:GCE] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for the cluster [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for pods for Hostname [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide DNS for ExternalName services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] DNS should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network-edge][Feature:Idling] Unidling should handle many UDP senders (by continuing to drop all packets on the floor) [Serial] [Suite:openshift/conformance/serial]": {}, - "[sig-network-edge][Feature:Idling] Unidling should handle many TCP connections by possibly dropping those over a certain bound [Serial] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/serial]": {}, - "[sig-instrumentation] Events should ensure that an event can be fetched, patched, deleted, and listed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-instrumentation] Events should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-instrumentation] Events API should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamTagsAdmission [Suite:openshift/conformance/serial]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamWithoutDockerImageConfig [Suite:openshift/conformance/serial]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamMappingCreate [Suite:openshift/conformance/serial]": {}, - "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should restore itself after quorum loss [Serial]": {}, - "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should recover from a backup taken on one node and recovered on another [Serial]": {}, - "[sig-cluster-lifecycle] cluster upgrade should complete in 210.00 minutes": {}, - "[sig-cluster-lifecycle] cluster upgrade should complete in 105.00 minutes": {}, - "[sig-cluster-lifecycle] Cluster version operator acknowledges upgrade": {}, - "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support port-forward [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support inline execution and attach [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support exec using resource/name [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should support exec [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should return command exit codes running a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should contain last line of the log [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl client-side validation should create/apply a valid CR with arbitrary-extra properties for CRD with partially-specified validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl client-side validation should create/apply a valid CR for CRD with validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl client-side validation should create/apply a CR with unknown fields for CRD with no validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth][Feature:ProjectAPI][Serial] TestUnprivilegedNewProjectDenied [Suite:openshift/conformance/serial]": {}, - "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts should mount projected service account token [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-architecture] platform pods should not fail to start": {}, - "[sig-architecture] platform pods should not exit more than once with a non-zero exit code": {}, - "[sig-architecture] platform pods should not be force deleted with gracePeriod 0": {}, - "[sig-arch] events should not repeat pathologically in e2e namespaces": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Job should run a job to completion when tasks succeed [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Job should remove pods when job is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should not create pods when created in suspend state [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should fail when exceeds active deadline [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should fail to exceed backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should delete pods when suspended [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should delete a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment iterative rollouts should eventually progress [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Deployment deployment should support rollover [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment deployment should delete old replica sets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-apps] CronJob should support CronJob API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] CronJob should remove from active list jobs that have been deleted [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance] [Suite:k8s]": {}, - "[sig-apps] CronJob should not emit unexpected warnings [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] CronJob should delete failed finished jobs with limit of one job [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] server version should find the server version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] openshift-apiserver-reused-connection should be available": {}, - "[sig-api-machinery] openshift-apiserver-new-connection should be available": {}, - "[sig-api-machinery] oauth-apiserver-reused-connection should be available": {}, - "[sig-api-machinery] oauth-apiserver-new-connection should be available": {}, - "[sig-api-machinery] kube-apiserver-reused-connection should be available": {}, - "[sig-api-machinery] kube-apiserver-new-connection should be available": {}, - "[sig-api-machinery] kube-apiserver terminates within graceful termination period": {}, - "[sig-api-machinery] health handlers should contain necessary checks [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Servers with support for Table transformation should return pod details [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should work for subresources [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should work for CRDs [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with terminating scopes through scope selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with best effort scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn). [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists). [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds) [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Discovery Custom resource should have storage version hash [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] API data in etcd should be stored at the correct location and version for all resources [Serial] [Suite:openshift/conformance/serial]": {}, - "[install] [Suite: service-definition] [OSD] regularuser validating webhook regularuser validating webhook Privledged users allowed to create autoscalers and delete clusterversion objects": {}, - "[install] [Suite: service-definition] [OSD] Privileged Containers Privileged containers are not allowed privileged container should not get created": {}, - "[install] [Suite: service-definition] [OSD] NodeLabels Modifying nodeLabels is not allowed node-label cannot be added": {}, - "[install] [Suite: service-definition] [OSD] DaemonSets DaemonSets are not allowed worker node daemonset should get created": {}, - "[install] [Suite: service-definition] [OSD] DaemonSets DaemonSets are not allowed infra node daemonset should get created": {}, - "[install] [Suite: service-definition] [OSD] DaemonSets DaemonSets are not allowed empty node-label daemonset should get created": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator splunkforwarders admin should be able to manage SplunkForwarders CR": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator deployment should exist": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator configmaps should exist": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator clusterServiceVersion openshift-splunk-forwarder-operator/splunk-forwarder-operator should be present and in succeeded state": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator clusterRoleBindings should exist": {}, - "[install] [Suite: operators] [OSD] Splunk Forwarder Operator Operator Upgrade should upgrade from the replaced version": {}, - "[install] [Suite: operators] [OSD] RBAC Operator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] RBAC Operator deployment should exist": {}, - "[install] [Suite: operators] [OSD] RBAC Operator configmaps should exist": {}, - "[install] [Suite: operators] [OSD] RBAC Operator clusterServiceVersion openshift-rbac-permissions/rbac-permissions-operator should be present and in succeeded state": {}, - "[install] [Suite: operators] [OSD] RBAC Operator clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] RBAC Operator Operator Upgrade should upgrade from the replaced version": {}, - "[install] [Suite: operators] [OSD] RBAC Dedicated Admins SubjectPermission SubjectPermission should have the expected ClusterRoles, ClusterRoleBindings and RoleBindinsg": {}, - "[install] [Suite: operators] [OSD] Prune jobs pruner jobs should works deployments-pruner should run successfully": {}, - "[install] [Suite: operators] [OSD] Prune jobs pruner jobs should works builds-pruner should run successfully": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test service should exist": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test deployment should exist": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test clusterServiceVersion openshift-osd-metrics/osd-metrics-exporter should be present and in succeeded state": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test clusterRoleBindings should exist": {}, - "[install] [Suite: operators] [OSD] OSD Metrics Exporter Basic Test Operator Upgrade should upgrade from the replaced version": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator deployment should exist": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator configmaps should exist": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator clusterServiceVersion openshift-must-gather-operator/must-gather-operator should be present and in succeeded state": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator as an elevated SRE can manage MustGather CRs in openshift-must-gather-operator namespace": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator as Members of CEE can manage MustGather CRs in openshift-must-gather-operator namespace": {}, - "[install] [Suite: operators] [OSD] Must Gather Operator Operator Upgrade should upgrade from the replaced version": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero backups should be complete": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit VolumeSnapshotLocations": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit ServerStatusRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit Schedules": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit RestricRepository": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit Restore": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit PodVolumeRestores": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit PodVolumeBackups": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit DownloadRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit DeleteBackupRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit Backups": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be forbidden to edit BackupStorageLocations": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit VolumeSnapshotLocations": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit ServerStatusRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit Schedules": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit RestricRepository": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit Restore": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit PodVolumeRestores": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit PodVolumeBackups": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit DownloadRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit DeleteBackupRequests": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit Backups": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator velero Access should be allowed to edit BackupStorageLocations": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator roles should exist": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator roleBindings should exist": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator deployment should exist": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator configmaps should exist": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] Managed Velero Operator clusterRoleBindings should exist": {}, - "[install] [Suite: operators] [OSD] Custom Domains Operator Should allow dedicated-admins to create custom domains Should be resolvable by external services": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator serviceAccounts should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator roles with prefix should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator deployment should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator configmaps should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator clusterServiceVersion openshift-monitoring/configure-alertmanager-operator should be present and in succeeded state": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator clusterRoles should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator clusterRoleBindings should exist": {}, - "[install] [Suite: operators] [OSD] Configure AlertManager Operator Operator Upgrade should upgrade from the replaced version": {}, - "[install] [Suite: operators] [OSD] Certman Operator certificate secret should be applied when cluster installed certificate secret should be applied to apiserver object": {}, - "[install] [Suite: operators] [OSD] Certman Operator certificate secret should be applied when cluster installed certificate secret exist under openshift-config namespace": {}, - "[install] [Suite: operators] CloudIngressOperator rh-api-test hostname should resolve": {}, - "[install] [Suite: operators] CloudIngressOperator rh-api-test cidr block changes should updated the service": {}, - "[install] [Suite: operators] CloudIngressOperator publishingstrategies dedicated admin should not be allowed to manage publishingstrategies CR": {}, - "[install] [Suite: operators] CloudIngressOperator publishingstrategies cluster admin should be allowed to manage publishingstrategies CR": {}, - "[install] [Suite: operators] CloudIngressOperator deployment should have all desired replicas ready": {}, - "[install] [Suite: operators] CloudIngressOperator deployment should exist": {}, - "[install] [Suite: operators] CloudIngressOperator apischeme dedicated admin should not be allowed to manage apischemes CR": {}, - "[install] [Suite: operators] CloudIngressOperator apischeme cluster admin should be allowed to manage apischemes CR": {}, - "[install] [Suite: operators] CloudIngressOperator apischeme apischemes CR instance must be present on cluster": {}, - "[install] [Suite: operators] AlertmanagerInhibitions should exist": {}, - "[install] [Suite: operators] AlertmanagerInhibitions inhibits ClusterOperatorDegraded": {}, - "[install] [Suite: e2e] [OSD] namespace validating webhook namespace validating webhook dedicated admins cannot manage privileged namespaces": {}, - "[install] [Suite: e2e] [OSD] namespace validating webhook namespace validating webhook Privileged users can manage all namespaces": {}, - "[install] [Suite: e2e] [OSD] namespace validating webhook namespace validating webhook Non-privileged users cannot manage privileged namespaces": {}, - "[install] [Suite: e2e] [OSD] RBAC Dedicated Admins SCC permissions scc-test new SCC does not break pods": {}, - "[install] [Suite: e2e] [OSD] RBAC Dedicated Admins SCC permissions Dedicated Admin permissions should include nonroot": {}, - "[install] [Suite: e2e] [OSD] RBAC Dedicated Admins SCC permissions Dedicated Admin permissions should include anyuid": {}, - "[install] [Suite: e2e] [OSD] RBAC Dedicated Admins SCC permissions Dedicated Admin permissions can create pods with SCCs": {}, - "[install] [Suite: e2e] [OSD] Prometheus Exporters should exist and be running in the cluster": {}, - "[install] [Suite: e2e] [OSD] OCM Quay Fallback uses a quay mirror when quay is unavailable": {}, - "[install] [Suite: e2e] [OSD] OCM Metrics do exist and are not empty": {}, - "[install] [Suite: e2e] [OSD] HTTP Strict Transport Security Validating HTTP strict transport security should be set for openshift-monitoring OSD managed routes": {}, - "[install] [Suite: e2e] [OSD] HTTP Strict Transport Security Validating HTTP strict transport security should be set for openshift-console OSD managed routes": {}, - "[install] [Suite: e2e] Workload (redmine) should get created in the cluster": {}, - "[install] [Suite: e2e] Workload (guestbook) should get created in the cluster": {}, - "[install] [Suite: e2e] Validation Webhook should exist and be running in the cluster": {}, - "[install] [Suite: e2e] Storage storage create PVCs": {}, - "[install] [Suite: e2e] Storage sc-list should be able to be expanded": {}, - "[install] [Suite: e2e] Routes should be functioning for oauth": {}, - "[install] [Suite: e2e] Routes should be functioning for Console": {}, - "[install] [Suite: e2e] Routes should be created for oauth": {}, - "[install] [Suite: e2e] Routes should be created for Console": {}, - "[install] [Suite: e2e] Pods should not be Failed": {}, - "[install] [Suite: e2e] Pods should be Running or Succeeded": {}, - "[install] [Suite: e2e] MachineHealthChecks worker MHC should exist": {}, - "[install] [Suite: e2e] MachineHealthChecks should replace unhealthy nodes": {}, - "[install] [Suite: e2e] MachineHealthChecks infra MHC should exist": {}, - "[install] [Suite: e2e] ImageStreams should exist in the cluster": {}, - "[install] [Suite: e2e] Encrypted Storage in GCP clusters can be created by dedicated admins": {}, - "[install] [Suite: e2e] Cluster state should include Prometheus data": {}, - "[install] [Suite: e2e] Cluster state should have no alerts": {}, - "[install] [Suite: app-builds] OpenShift Application Build E2E should get created in the cluster": {}, - "[bz-service-ca] clusteroperator/service-ca should not change condition/Degraded": {}, - "[bz-service-ca] clusteroperator/service-ca should not change condition/Available": {}, - "[bz-openshift-controller-manager] clusteroperator/openshift-controller-manager should not change condition/Degraded": {}, - "[bz-openshift-controller-manager] clusteroperator/openshift-controller-manager should not change condition/Available": {}, - "[bz-openshift-apiserver] clusteroperator/openshift-apiserver should not change condition/Degraded": {}, - "[bz-openshift-apiserver] clusteroperator/openshift-apiserver should not change condition/Available": {}, - "[bz-kube-storage-version-migrator] clusteroperator/kube-storage-version-migrator should not change condition/Degraded": {}, - "[bz-kube-storage-version-migrator] clusteroperator/kube-storage-version-migrator should not change condition/Available": {}, - "[bz-kube-scheduler] clusteroperator/kube-scheduler should not change condition/Degraded": {}, - "[bz-kube-scheduler] clusteroperator/kube-scheduler should not change condition/Available": {}, - "[bz-kube-controller-manager] clusteroperator/kube-controller-manager should not change condition/Degraded": {}, - "[bz-kube-controller-manager] clusteroperator/kube-controller-manager should not change condition/Available": {}, - "[bz-kube-apiserver] clusteroperator/kube-apiserver should not change condition/Degraded": {}, - "[bz-kube-apiserver] clusteroperator/kube-apiserver should not change condition/Available": {}, - "[bz-config-operator] clusteroperator/config-operator should not change condition/Degraded": {}, - "[bz-config-operator] clusteroperator/config-operator should not change condition/Available": {}, - "[bz-cluster-api] clusteroperator/cluster-api should not change condition/Degraded": {}, - "[bz-cluster-api] clusteroperator/cluster-api should not change condition/Available": {}, - "[bz-cloud-controller-manager] clusteroperator/cloud-controller-manager should not change condition/Degraded": {}, - "[bz-cloud-controller-manager] clusteroperator/cloud-controller-manager should not change condition/Available": {}, - "[bz-baremetal] clusteroperator/baremetal should not change condition/Degraded": {}, - "[bz-baremetal] clusteroperator/baremetal should not change condition/Available": {}, - "[bz-apiserver-auth] clusteroperator/authentication should not change condition/Degraded": {}, - "[bz-apiserver-auth] clusteroperator/authentication should not change condition/Available": {}, - "[bz-Storage] clusteroperator/storage should not change condition/Degraded": {}, - "[bz-Storage] clusteroperator/storage should not change condition/Available": {}, - "[bz-Storage] clusteroperator/csi-snapshot-controller should not change condition/Degraded": {}, - "[bz-Storage] clusteroperator/csi-snapshot-controller should not change condition/Available": {}, - "[bz-Samples] clusteroperator/openshift-samples should not change condition/Degraded": {}, - "[bz-Samples] clusteroperator/openshift-samples should not change condition/Available": {}, - "[bz-Routing] clusteroperator/ingress should not change condition/Degraded": {}, - "[bz-Routing] clusteroperator/ingress should not change condition/Available": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager-packageserver should not change condition/Degraded": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager-packageserver should not change condition/Available": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager-catalog should not change condition/Degraded": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager-catalog should not change condition/Available": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager should not change condition/Degraded": {}, - "[bz-OLM] clusteroperator/operator-lifecycle-manager should not change condition/Available": {}, - "[bz-OLM] clusteroperator/marketplace should not change condition/Degraded": {}, - "[bz-OLM] clusteroperator/marketplace should not change condition/Available": {}, - "[bz-Node Tuning Operator] clusteroperator/node-tuning should not change condition/Degraded": {}, - "[bz-Node Tuning Operator] clusteroperator/node-tuning should not change condition/Available": {}, - "[bz-Networking] clusteroperator/network should not change condition/Degraded": {}, - "[bz-Networking] clusteroperator/network should not change condition/Available": {}, - "[bz-Monitoring] clusteroperator/monitoring should not change condition/Degraded": {}, - "[bz-Monitoring] clusteroperator/monitoring should not change condition/Available": {}, - "[bz-Management Console] clusteroperator/console should not change condition/Degraded": {}, - "[bz-Management Console] clusteroperator/console should not change condition/Available": {}, - "[bz-Machine Config Operator] clusteroperator/machine-config should not change condition/Degraded": {}, - "[bz-Machine Config Operator] clusteroperator/machine-config should not change condition/Available": {}, - "[bz-Insights Operator] clusteroperator/insights should not change condition/Degraded": {}, - "[bz-Insights Operator] clusteroperator/insights should not change condition/Available": {}, - "[bz-Image Registry] clusteroperator/image-registry should not change condition/Degraded": {}, - "[bz-Image Registry] clusteroperator/image-registry should not change condition/Available": {}, - "[bz-Etcd] clusteroperator/etcd should not change condition/Degraded": {}, - "[bz-Etcd] clusteroperator/etcd should not change condition/Available": {}, - "[bz-DNS] clusteroperator/dns should not change condition/Degraded": {}, - "[bz-DNS] clusteroperator/dns should not change condition/Available": {}, - "[bz-Cloud Credential Operator] clusteroperator/cloud-credential should not change condition/Degraded": {}, - "[bz-Cloud Credential Operator] clusteroperator/cloud-credential should not change condition/Available": {}, - "[bz-Cloud Compute] clusteroperator/machine-approver should not change condition/Degraded": {}, - "[bz-Cloud Compute] clusteroperator/machine-approver should not change condition/Available": {}, - "[bz-Cloud Compute] clusteroperator/machine-api should not change condition/Degraded": {}, - "[bz-Cloud Compute] clusteroperator/machine-api should not change condition/Available": {}, - "[bz-Cloud Compute] clusteroperator/cluster-autoscaler should not change condition/Degraded": {}, - "[bz-Cloud Compute] clusteroperator/cluster-autoscaler should not change condition/Available": {}, - "[Conformance][sig-sno][Serial] Cluster should allow a fast rollout of kube-apiserver [Suite:openshift/conformance/serial/minimal]": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: settings cluster": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: search all-namespaces?kind=config.openshift.io~v1~Console": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: monitoring query-browser": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s ns openshift-monitoring monitoring.coreos.com~v1~Alertmanager main": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s ns openshift-machine-api machine.openshift.io~v1beta1~MachineSet": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s ns openshift-machine-api machine.openshift.io~v1beta1~MachineHealthCheck": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s ns openshift-machine-api machine.openshift.io~v1beta1~Machine": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s ns openshift-machine-api autoscaling.openshift.io~v1beta1~MachineAutoscaler": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s cluster user.openshift.io~v1~User": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s cluster nodes": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s cluster machineconfiguration.openshift.io~v1~MachineConfigPool": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s cluster machineconfiguration.openshift.io~v1~MachineConfig": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s cluster clusterroles view": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s all-namespaces monitoring.coreos.com~v1~Alertmanager": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s all-namespaces import": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: k8s all-namespaces events": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: api-resource ns default core~v1~Pod schema": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: api-resource ns default core~v1~Pod instances": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: api-resource ns default core~v1~Pod access": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: api-resource ns default core~v1~Pod": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: api-explorer": {}, - "Visiting other routes.Visiting other routes successfully displays view for route: ": {}, - "Using OLM descriptor components.Using OLM descriptor components successfully creates operand using form": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Select field": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Password field": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Number field": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Name field": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Labels field": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Field Group": {}, - "Using OLM descriptor components.Using OLM descriptor components pre-populates Array Field Group": {}, - "Using OLM descriptor components.Using OLM descriptor components does not render hidden field group": {}, - "Using OLM descriptor components.Using OLM descriptor components does not display status descriptor for Hidden": {}, - "Using OLM descriptor components.Using OLM descriptor components does not display spec descriptor for Hidden": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for W3 Link": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for Text": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for Prometheus Endpoint": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for Pod Statuses": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for Pod Count": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for Password": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for K8s Phase Reason": {}, - "Using OLM descriptor components.Using OLM descriptor components displays status descriptor for K8s Phase": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Update Strategy": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Text": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Resource Requirements": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Pod Count": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Pod Anti Affinity": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Pod Affinity": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Password": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Number": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Node Affinity": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Namespace Selector": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Label": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Image Pull Policy": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Field Dependency": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Endpoint List": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Checkbox": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Boolean Switch": {}, - "Using OLM descriptor components.Using OLM descriptor components displays spec descriptor for Advanced": {}, - "Using OLM descriptor components.Using OLM descriptor components displays list containing operands": {}, - "Using OLM descriptor components.Using OLM descriptor components displays form for creating operand": {}, - "Using OLM descriptor components.Using OLM descriptor components displays detail view for operand": {}, - "Using OLM descriptor components.Using OLM descriptor components deletes operand": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource with a restart endpoint [test_id:2265][posneg:negative] should return an error when VM has not been found but VMI is running": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource with a restart endpoint [test_id:1305][posneg:negative] should return an error when VM is not running": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource with a restart endpoint [test_id:1304] should restart a VM": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource With manual RunStrategy [test_id:3175]Should restart when VM is running": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource With manual RunStrategy [test_id:3174]Should not restart when VM is not running": {}, - "Tests Suite.[sig-compute]Subresource Api [rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component] VirtualMachine subresource With RunStrategy RerunOnFailure [test_id:3176]Should restart the VM": {}, - "Tests Suite.[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]ContainerDisk [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting with virtio-win with virtio-win as secondary disk [test_id:1467]should boot and have the virtio as sata CDROM": {}, - "Tests Suite.[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]ContainerDisk [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting multiple VMIs with ephemeral registry disk [test_id:1465]should success": {}, - "Tests Suite.[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]ContainerDisk [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting from custom image location with disk at /custom-disk/downloaded [test_id:1466]should boot normally": {}, - "Tests Suite.[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]ContainerDisk [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting and stopping the same VirtualMachineInstance with ephemeral registry disk [test_id:1463][Conformance] should success multiple times": {}, - "Tests Suite.[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]ContainerDisk [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting a VirtualMachineInstance with ephemeral registry disk [test_id:1464]should not modify the spec on status update": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Get a VirtualMachineInstance when that not exist [test_id:1649]should return 404": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Delete a VirtualMachineInstance's Pod [test_id:1650]should result in the VirtualMachineInstance moving to a finalized state": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Delete a VirtualMachineInstance with grace period greater than 0 [test_id:1655]should run graceful shutdown": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Delete a VirtualMachineInstance with an active pod. [test_id:1651]should result in pod being terminated": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Delete a VirtualMachineInstance with ACPI and some grace period seconds [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]should result in vmi status succeeded [test_id:1654]with default grace period seconds": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Delete a VirtualMachineInstance with ACPI and some grace period seconds [rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]should result in vmi status succeeded [test_id:1653]with set grace period seconds": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with user-data without k8s secret [test_id:1630]should log warning and proceed once the secret is there": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with user-data without k8s secret [test_id:1629][posneg:negative]should not be able to start virt-launcher pod": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with non default namespace [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]should log libvirt start and stop lifecycle events of the domain [test_id:1642]kubevirt-test-alternative": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with non default namespace [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]should log libvirt start and stop lifecycle events of the domain [test_id:1641]kubevirt-test-default": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with boot order [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]should be able to boot from selected disk [test_id:1628]Cirros as first boot": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with boot order [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]should be able to boot from selected disk [test_id:1627]Alpine as first boot": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with affinity [test_id:1638]the vmi with node affinity and anti-pod affinity should not be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance with affinity [test_id:1637]the vmi with node affinity and no conflicts should be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance when virt-launcher crashes [Serial][test_id:1631]should be stopped and have Failed phase": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance when name is longer than 63 characters [test_id:1625]should start it": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance when it already exist [test_id:1626]should be rejected": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:6095]should start in paused state if start strategy set to paused": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:3197]should log libvirtd debug logs when enabled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:3196]should carry kubernetes and kubevirt annotations to pod": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:3195]should carry annotations to pod": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1624]should reject PATCH if schema is invalid": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1623]should reject POST if validation webhook deems the spec invalid": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1622]should log libvirtd logs": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1621]should attach virt-launcher to it": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1620]should start it": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [test_id:1619]should success": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node tainted [test_id:1636]the vmi without tolerations should not be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node tainted [test_id:1635]the vmi with tolerations should be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery the vmi with HyperV feature matching a nfd label on a node should be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery the vmi with EVMCS HyperV feature should have correct hyperv and cpu features auto filled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery [test_id:3204]the vmi with cpu.feature policy 'forbid' should not be scheduled on a node with that cpu feature label": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery [test_id:3202]the vmi with cpu.features matching nfd labels on a node should be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery [test_id:1640]the vmi with cpu.model that cannot match an nfd label on node should not be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with node feature discovery [test_id:1639]the vmi with cpu.model matching a nfd label on a node should be scheduled": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with default cpu model [test_id:3200]should not set default cpu model when vmi has it set": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with default cpu model [test_id:3199]should set default cpu model when vmi doesn't have it set": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]with default cpu model [sig-compute][test_id:3201]should not set cpu model when vmi does not have it set and default cpu model is not set": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]when virt-handler is responsive [test_id:3198]device plugins should re-register if the kubelet restarts": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]when virt-handler is responsive [test_id:1633]should indicate that a node is ready for vmis": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]when virt-handler is not responsive [test_id:1634]the node controller should mark the node as unschedulable when the virt-handler heartbeat has timedout": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance [Serial]when virt-handler crashes [test_id:1632]should recover and continue management": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance VM Accelerated Mode [test_id:1648]Should provide KVM via plugin framework": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance VM Accelerated Mode [test_id:1647]should not enable emulation in virt-launcher": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance VM Accelerated Mode [test_id:1646]should request a KVM and TUN device": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [Serial][rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Killed VirtualMachineInstance [test_id:1657]should be left alone by virt-handler": {}, - "Tests Suite.[rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]VMIlifecycle [Serial][rfe_id:273][crit:high][vendor:cnv-qe@redhat.com][level:component]Killed VirtualMachineInstance [test_id:1656]should be in Failed phase": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud userDataBase64 source with injected ssh-key [test_id:1616]should have ssh-key under authorized keys": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud userDataBase64 source [test_id:1615]should have cloud-init data": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud userData source [test_id:1617]should process provided cloud-init data": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud networkData [test_id:3183]should have cloud-init network-config from k8s secret": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud networkData [test_id:3182]should have cloud-init network-config with NetworkDataBase64 source": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitNoCloud networkData [test_id:3181]should have cloud-init network-config with NetworkData source": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive userDataBase64 source with injected ssh-key [test_id:3178]should have ssh-key under authorized keys": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive userDataBase64 source [test_id:3178]should have cloud-init data": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive userData source [test_id:3180]should process provided cloud-init data": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:4622]should have cloud-init meta_data with tagged devices": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:3187]should have cloud-init userdata and network-config from separate k8s secrets with lowercase labels": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:3187]should have cloud-init userdata and network-config from separate k8s secrets with camelCase labels": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:3186]should have cloud-init network-config from k8s secret": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:3185]should have cloud-init network-config with NetworkDataBase64 source": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance with cloudInitConfigDrive networkData [test_id:3184]should have cloud-init network-config with NetworkData source": {}, - "Tests Suite.[rfe_id:151][crit:high][vendor:cnv-qe@redhat.com][level:component][sig-compute]CloudInit UserData [rfe_id:151][crit:medium][vendor:cnv-qe@redhat.com][level:component]A new VirtualMachineInstance [test_id:1618]should take user-data from k8s secret": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl as ordinary OCP user trough test service account should succeed with right rights [test_id:2839]should create VM via command line": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl as ordinary OCP user trough test service account should fail without right rights [test_id:2914]should create VM via command line": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [test_id:264]should create and delete via command line": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [test_id:243][posneg:negative]should create VM only once": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [test_id:233][posneg:negative]should fail when deleting nonexistent VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [test_id:232]should create same manifest twice via command line": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [release-blocker][test_id:299]should create VM via command line using all supported API versions with v1alpha3 api": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [rfe_id:273]with oc/kubectl [release-blocker][test_id:299]should create VM via command line using all supported API versions with v1 api": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [Serial]A mutated VirtualMachine given [test_id:3312]should set the default MachineType when created without explicit value": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine [Serial]A mutated VirtualMachine given [test_id:3311]should keep the supplied MachineType when created": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine An invalid VirtualMachine given with a PVC from a Datavolume [sig-storage][test_id:4643]should NOT be rejected when VM template lists a DataVolume, but VM lists PVC VolumeSource": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine An invalid VirtualMachine given [test_id:1519]should reject POST if validation webhoook deems the spec is invalid": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine An invalid VirtualMachine given [test_id:1518]should be rejected on POST": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given cpu/memory in requests/limits should allow int type": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given cpu/memory in requests/limits should allow float type": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:4645]should set the Ready condition on VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:3162]should ignore kubernetes and kubevirt annotations to VMI": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:3161]should carry annotations to VMI": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1528]should survive guest shutdown, multiple times": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1527]should not update the VirtualMachineInstance spec if Running": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1526]should start and stop VirtualMachineInstance multiple times": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1525]should stop VirtualMachineInstance if running set to false with ContainerDisk": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1524]should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1523]should recreate VirtualMachineInstance if it gets deleted": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1522]should remove owner references on the VirtualMachineInstance if it is orphan deleted": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1521]should remove VirtualMachineInstance once the VM is marked for deletion with ContainerDisk": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given [test_id:1520]should update VirtualMachine once VMIs are up with ContainerDisk": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface [test_id:3007]Should force restart a VM with terminationGracePeriodSeconds>0": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface [test_id:1529]should start a VirtualMachineInstance once": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyRerunOnFailure [test_id:2188] should not remove a succeeded VMI": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyRerunOnFailure [test_id:2187] should restart a running VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyRerunOnFailure [test_id:2186] should stop a running VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyManual [test_id:2190] should not remove a succeeded VMI": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyManual [test_id:2189] should stop": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyManual [test_id:2036] should start": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyManual [test_id:2035] should restart": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyHalted [test_id:2037] should start a stopped VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyAlways [test_id:4119]should migrate a running VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyAlways [test_id:3165]should restart a succeeded VMI": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyAlways [test_id:3164]should restart a running VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Using RunStrategyAlways [test_id:3163]should stop a running VM": {}, - "Tests Suite.[rfe_id:1177][crit:medium][vendor:cnv-qe@redhat.com][level:component][sig-compute]VirtualMachine A valid VirtualMachine given Using virtctl interface Should force stop a VMI": {}, - "Testing uninstall of Red Hat CodeReady Workspaces Operator.Testing uninstall of Red Hat CodeReady Workspaces Operator successfully uninstalls Operator and deletes all Operands": {}, - "Testing uninstall of Red Hat CodeReady Workspaces Operator.Testing uninstall of Red Hat CodeReady Workspaces Operator installs Red Hat CodeReady Workspaces Operator and CodeReady Workspaces Cluster Instance, then navigates to Operator details page": {}, - "Testing uninstall of Red Hat CodeReady Workspaces Operator.Testing uninstall of Red Hat CodeReady Workspaces Operator attempts to uninstall the Operator, shows 'Error uninstalling Operator' alert": {}, - "Testing uninstall of Red Hat CodeReady Workspaces Operator.Testing uninstall of Red Hat CodeReady Workspaces Operator attempts to uninstall the Operator, shows 'Cannot load Operands' alert": {}, - "Testing uninstall of Red Hat CodeReady Workspaces Operator.Testing uninstall of Red Hat CodeReady Workspaces Operator attempts to uninstall the Operator and delete all Operand Instances, shows 'Error Deleting Operands' alert": {}, - "Testing haproxy router.haproxy config information should be clean when changing the service to another route": {}, - "Testing haproxy router.can set cookie name for reencrypt routes by annotation": {}, - "Testing haproxy router.The router pod should have default resource limits": {}, - "Testing haproxy router.The backend health check interval of reencrypt route can be set by annotation": {}, - "Testing haproxy router.The backend health check interval of passthrough route can be set by annotation": {}, - "Testing haproxy router.The 'headerBufferBytes' and 'headerBufferMaxRewriteBytes' strictly honours the default minimum values during ingrescontroller deployment": {}, - "Testing haproxy router.The \"tune.maxrewrite\" value for haproxy router can be modified with \"headerBufferMaxRewriteBytes\" ingresscontroller parameter": {}, - "Testing haproxy router.The \"tune.bufsize\" value for haproxy router can be modified with \"headerBufferBytes\" ingresscontroller parameter": {}, - "Testing haproxy router.The \"tune.bufsize\" and \"tune.maxwrite\" values can be defined per haproxy router basis": {}, - "Testing haproxy router.Protect from ddos by limiting TCP concurrent connection for route": {}, - "Testing haproxy router.No health check when there is only one endpoint for a route": {}, - "Testing haproxy router.Health check when there are multi service and each service has one backend": {}, - "Testing haproxy router.Disable haproxy hash based sticky session for edge termination routes": {}, - "Testing haproxy router.\"ingress.operator.openshift.io/hard-stop-after\" annotation can be applied on per ingresscontroller basis": {}, - "Testing abrouting.The passthrough route with multiple service will set load balance policy to RoundRobin by default": {}, - "Testing abrouting.Set max backends weight for ab routing": {}, - "Testing abrouting.Endpoint will end up weight 1 when scaled weight per endpoint is less than 1": {}, - "Testing abrouting.Each endpoint gets weight/numberOfEndpoints portion of the requests - edge route": {}, - "Testing abrouting.Could not set more than 3 additional backends for route": {}, - "Testing HTTP Headers related scenarios.HTTP request header cases adjustments only gets applied for routes with \"haproxy.router.openshift.io/h1-adjust-case=true\" annotation": {}, - "Testing HTTP Headers related scenarios.HTTP header case using \"headerNameCaseAdjustments\" parameter cannot be modified for the passthrough routes.": {}, - "Testing HTTP Headers related scenarios.HTTP header case can be modified with \"headerNameCaseAdjustments\" ingresscontroller parameter for unsecure/REEN/Edge routes": {}, - "Testing DNS features.integrate DNS operator metrics with Prometheus": {}, - "Testing DNS features.Integrate coredns metrics with monitoring component": {}, - "Testing DNS features.Hostname lookup does not delay when master node down": {}, - "Testing DNS features.DNS can resolve the external domain": {}, - "Testing DNS features.DNS can resolve the ExternalName services": {}, - "Testing DNS features.DNS can resolve the ClusterIP services": {}, - "Testing DNS features.CoreDNS has been upgraded to v1.8.z for OCP4.8 or higher": {}, - "Testing DNS features.CoreDNS cache should use 900s for positive responses and 30s for negative responses": {}, - "Test perspective query parameters.Test perspective query parameters tests Developer query parameter": {}, - "Test perspective query parameters.Test perspective query parameters tests Administrator query parameter": {}, - "Symptom Detection.Undiagnosed panic detected in journal": {}, - "Symptom Detection.Infrastructure - GCP quota exceeded (route to forum-gcp)": {}, - "Symptom Detection.Infrastructure - AWS simulate policy rate-limit": {}, - "Symptom Detection.Bug 1812261: iptables is segfaulting": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass search view displays created resource instance": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass edits the resource instance": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass displays detail view for newly created resource instance": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass displays a list view for the resource": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass deletes the resource instance": {}, - "StorageClass.Kubernetes resource CRUD operations StorageClass creates the resource instance": {}, - "StatefulSets.Visiting Overview page StatefulSets shows statefulset details sidebar when item is clicked": {}, - "StatefulSets.Visiting Overview page StatefulSets displays a statefulset in the overview list page": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet search view displays created resource instance": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet edits the resource instance": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet displays detail view for newly created resource instance": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet displays a list view for the resource": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet deletes the resource instance": {}, - "StatefulSet.Kubernetes resource CRUD operations StatefulSet creates the resource instance": {}, - "Skipping Snapshot Tests.Skipping Snapshot Tests No CSI based storage classes are available in this platform": {}, - "Skipping Clone Tests.Skipping Clone Tests No CSI based storage classes are available in this platform": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount search view displays created resource instance": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount edits the resource instance": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount displays detail view for newly created resource instance": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount displays a list view for the resource": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount deletes the resource instance": {}, - "ServiceAccount.Kubernetes resource CRUD operations ServiceAccount creates the resource instance": {}, - "Service.Kubernetes resource CRUD operations Service search view displays created resource instance": {}, - "Service.Kubernetes resource CRUD operations Service edits the resource instance": {}, - "Service.Kubernetes resource CRUD operations Service displays detail view for newly created resource instance": {}, - "Service.Kubernetes resource CRUD operations Service displays a list view for the resource": {}, - "Service.Kubernetes resource CRUD operations Service deletes the resource instance": {}, - "Service.Kubernetes resource CRUD operations Service creates the resource instance": {}, - "Secret.Kubernetes resource CRUD operations Secret search view displays created resource instance": {}, - "Secret.Kubernetes resource CRUD operations Secret edits the resource instance": {}, - "Secret.Kubernetes resource CRUD operations Secret displays detail view for newly created resource instance": {}, - "Secret.Kubernetes resource CRUD operations Secret displays a list view for the resource": {}, - "Secret.Kubernetes resource CRUD operations Secret deletes the resource instance": {}, - "Secret.Kubernetes resource CRUD operations Secret creates the resource instance": {}, - "SCC policy related scenarios.pod should only be created with SC UID in the available range with the SCC restricted.": {}, - "SCC policy related scenarios.[platformmanagement_public_586] Check if the capabilities work in pods": {}, - "SCC policy related scenarios.The SCC will take effect only when the user request the SC in the pod": {}, - "SCC policy related scenarios.OpenShift SCC check, empty seccomp": {}, - "SCC policy related scenarios.OpenShift SCC check, all seccomp allowed": {}, - "SCC policy related scenarios.Create pod with request capabilities conflict with the scc": {}, - "SCC policy related scenarios.4.x User can know which serviceaccount and SA groups can create the podspec against the current sccs": {}, - "Route.Kubernetes resource CRUD operations Route search view displays created resource instance": {}, - "Route.Kubernetes resource CRUD operations Route edits the resource instance": {}, - "Route.Kubernetes resource CRUD operations Route displays detail view for newly created resource instance": {}, - "Route.Kubernetes resource CRUD operations Route displays a list view for the resource": {}, - "Route.Kubernetes resource CRUD operations Route deletes the resource instance": {}, - "Route.Kubernetes resource CRUD operations Route creates the resource instance": {}, - "Roles and RoleBindings.Roles and RoleBindings test Roles detail page breadcrumbs to list page restores last selected project": {}, - "Roles and RoleBindings.Roles and RoleBindings test Roles detail page breadcrumbs to list page restores 'All Projects' dropdown": {}, - "Roles and RoleBindings.Roles and RoleBindings test RoleBindings detail page breadcrumbs to list page restores last selected project": {}, - "Roles and RoleBindings.Roles and RoleBindings test RoleBindings detail page breadcrumbs to list page restores 'All Projects' dropdown": {}, - "Roles and RoleBindings.Roles and RoleBindings test ClusterRoles detail page breadcrumbs to list page restores last selected project": {}, - "Roles and RoleBindings.Roles and RoleBindings test ClusterRoles detail page breadcrumbs to list page restores 'All Projects' dropdown": {}, - "Roles and RoleBindings.Roles and RoleBindings test ClusterRoleBindings detail page breadcrumbs to list page restores last selected project": {}, - "Roles and RoleBindings.Roles and RoleBindings test ClusterRoleBindings detail page breadcrumbs to list page restores 'All Projects' dropdown": {}, - "Role.Kubernetes resource CRUD operations Role search view displays created resource instance": {}, - "Role.Kubernetes resource CRUD operations Role edits the resource instance": {}, - "Role.Kubernetes resource CRUD operations Role displays detail view for newly created resource instance": {}, - "Role.Kubernetes resource CRUD operations Role displays a list view for the resource": {}, - "Role.Kubernetes resource CRUD operations Role deletes the resource instance": {}, - "Role.Kubernetes resource CRUD operations Role creates the resource instance": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota search view displays created resource instance": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota edits the resource instance": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota displays detail view for newly created resource instance": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota displays a list view for the resource": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota deletes the resource instance": {}, - "ResourceQuota.Kubernetes resource CRUD operations ResourceQuota creates the resource instance": {}, - "ResourceQuata for storage.Setting quota for a StorageClass": {}, - "ResourceQuata for storage.Requested storage can not exceed the namespace's storage quota": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController search view displays created resource instance": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController edits the resource instance": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController displays detail view for newly created resource instance": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController displays a list view for the resource": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController deletes the resource instance": {}, - "ReplicationController.Kubernetes resource CRUD operations ReplicationController creates the resource instance": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet search view displays created resource instance": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet edits the resource instance": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet displays detail view for newly created resource instance": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet displays a list view for the resource": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet deletes the resource instance": {}, - "ReplicaSet.Kubernetes resource CRUD operations ReplicaSet creates the resource instance": {}, - "Regression testing cases.RWO volumes are exclusively mounted on different nodes": {}, - "Regression testing cases.Check the binary files and selinux setting used by storage": {}, - "Quota related scenarios.check QoS Tier Guaranteed": {}, - "Quota related scenarios.check QoS Tier Burstable": {}, - "Quota related scenarios.check QoS Tier BestEffort": {}, - "Quota related scenarios.Precious resources should be restrained if they are covered in quota and not configured on the master": {}, - "Quota related scenarios.Precious resources should be consumed without constraint in the absence of a covering quota if they are not configured on the master": {}, - "Quota related scenarios.Admin can restrict the ability to use services.nodeports": {}, - "Quorum Loss and Restore.service-upgrade": {}, - "Quorum Loss and Restore.quorum_restore": {}, - "Quorum Loss and Restore.[sig-storage] [sig-api-machinery] secret-upgrade": {}, - "Quorum Loss and Restore.[sig-apps] statefulset-upgrade": {}, - "Quorum Loss and Restore.[sig-apps] replicaset-upgrade": {}, - "Quorum Loss and Restore.[sig-apps] deployment-upgrade": {}, - "Quorum Loss and Restore.[sig-apps] daemonset-upgrade": {}, - "Project Dashboard.Utilization Card.has duration dropdown": {}, - "Project Dashboard.Utilization Card.has all items": {}, - "Project Dashboard.Status Card.has health indicator": {}, - "Project Dashboard.Resource Quotas Card.shows Resource Quotas": {}, - "Project Dashboard.Launcher Card.is displayed when CR exists": {}, - "Project Dashboard.Inventory Card.has all items": {}, - "Project Dashboard.Details Card.has all fields populated": {}, - "Project Dashboard.Details Card.has View all link": {}, - "Project Dashboard.Dashboard is default details page": {}, - "Project Dashboard.Activity Card.has View events link": {}, - "Project Dashboard.Activity Card.has Pause events button": {}, - "Pod.Kubernetes resource CRUD operations Pod search view displays created resource instance": {}, - "Pod.Kubernetes resource CRUD operations Pod edits the resource instance": {}, - "Pod.Kubernetes resource CRUD operations Pod displays detail view for newly created resource instance": {}, - "Pod.Kubernetes resource CRUD operations Pod displays a list view for the resource": {}, - "Pod.Kubernetes resource CRUD operations Pod deletes the resource instance": {}, - "Pod.Kubernetes resource CRUD operations Pod creates the resource instance": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim search view displays created resource instance": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim edits the resource instance": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim displays detail view for newly created resource instance": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim displays a list view for the resource": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim deletes the resource instance": {}, - "PersistentVolumeClaim.Kubernetes resource CRUD operations PersistentVolumeClaim creates the resource instance": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume search view displays created resource instance": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume edits the resource instance": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume displays detail view for newly created resource instance": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume displays a list view for the resource": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume deletes the resource instance": {}, - "PersistentVolume.Kubernetes resource CRUD operations PersistentVolume creates the resource instance": {}, - "Operator upgrade storage": {}, - "Operator upgrade service-ca": {}, - "Operator upgrade operator-lifecycle-manager-packageserver": {}, - "Operator upgrade operator-lifecycle-manager-catalog": {}, - "Operator upgrade operator-lifecycle-manager": {}, - "Operator upgrade openshift-samples": {}, - "Operator upgrade openshift-controller-manager": {}, - "Operator upgrade node-tuning": {}, - "Operator upgrade marketplace": {}, - "Operator upgrade machine-approver": {}, - "Operator upgrade kube-storage-version-migrator": {}, - "Operator upgrade kube-scheduler": {}, - "Operator upgrade kube-apiserver": {}, - "Operator upgrade insights": {}, - "Operator upgrade dns": {}, - "Operator upgrade csi-snapshot-controller": {}, - "Operator upgrade console": {}, - "Operator upgrade config-operator": {}, - "Operator upgrade cluster-autoscaler": {}, - "Operator upgrade cloud-credential": {}, - "Operator upgrade cloud-controller-manager": {}, - "Operator upgrade baremetal": {}, - "Operator related networking scenarios.The clusteroperator should be able to reflect the network operator version corresponding to the OCP version": {}, - "Operator related networking scenarios.Should have a clusteroperator object created under config.openshift.io api group for network-operator": {}, - "Node management.check hooks_dir in crio conf file": {}, - "Node management.[BZ1817568]Liveness probe exec check should succeed": {}, - "Node management.NodeStatus and PodStatus show correct imageID while pulling by digests - 4.x": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy search view displays created resource instance": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy edits the resource instance": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy displays detail view for newly created resource instance": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy displays a list view for the resource": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy deletes the resource instance": {}, - "NetworkPolicy.Kubernetes resource CRUD operations NetworkPolicy creates the resource instance": {}, - "Namespace.Namespace lists, creates, and deletes": {}, - "Namespace.Namespace Nav and breadcrumbs restores last selected project when navigating from details to list view": {}, - "Namespace.Namespace Nav and breadcrumbs restores last selected \"All Projects\" when navigating from details to list view": {}, - "Multus-CNI related scenarios.Create pod with Multus bridge CNI plugin without vlan": {}, - "Monitoring: Alerts.Monitoring: Alerts displays and filters the Alerts list page, links to detail pages": {}, - "Monitoring: Alerts.Monitoring: Alerts creates and expires a Silence": {}, - "Machine features testing.Metrics is exposed on https (outline example : | https://machine-approver.openshift-cluster-machine-approver.svc:9192/metrics |)": {}, - "Machine features testing.Metrics is exposed on https (outline example : | https://machine-api-operator.openshift-machine-api.svc:8443/metrics |)": {}, - "Machine features testing.Metrics is exposed on https (outline example : | https://cluster-autoscaler-operator.openshift-machine-api.svc:9192/metrics |)": {}, - "Machine features testing.Machines should be linked to nodes": {}, - "Machine features testing.Baremetal clusteroperator should be disabled in any deployment that is not baremetal": {}, - "Localization.Localization pseudolocalizes utilization card": {}, - "Localization.Localization pseudolocalizes navigation": {}, - "Localization.Localization pseudolocalizes monitoring pages": {}, - "Localization.Localization pseudolocalizes masthead": {}, - "Localization.Localization pseudolocalizes activity card": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange search view displays created resource instance": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange edits the resource instance": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange displays detail view for newly created resource instance": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange displays a list view for the resource": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange deletes the resource instance": {}, - "LimitRange.Kubernetes resource CRUD operations LimitRange creates the resource instance": {}, - "Job.Kubernetes resource CRUD operations Job search view displays created resource instance": {}, - "Job.Kubernetes resource CRUD operations Job edits the resource instance": {}, - "Job.Kubernetes resource CRUD operations Job displays detail view for newly created resource instance": {}, - "Job.Kubernetes resource CRUD operations Job displays a list view for the resource": {}, - "Job.Kubernetes resource CRUD operations Job deletes the resource instance": {}, - "Job.Kubernetes resource CRUD operations Job creates the resource instance": {}, - "Interacting with the environment variable editor.When a variable is deleted.does not show any variables": {}, - "Interacting with the environment variable editor.When a variable is deleted from a secret.shows the correct variables": {}, - "Interacting with the environment variable editor.When a variable is deleted from a config map.shows the correct variables": {}, - "Interacting with the environment variable editor.When a variable is added.shows the correct variables": {}, - "Interacting with the environment variable editor.When a variable is added from a secret.shows the correct variables": {}, - "Interacting with the environment variable editor.When a variable is added from a config map.shows the correct variables": {}, - "Interacting with the create secret forms.Webhook secret.edits webhook secret": {}, - "Interacting with the create secret forms.Webhook secret.deletes the webhook secret": {}, - "Interacting with the create secret forms.Webhook secret.creates webhook secret": {}, - "Interacting with the create secret forms.Webhook secret.check for edited webhook secret value": {}, - "Interacting with the create secret forms.Webhook secret.check for created webhook secret value": {}, - "Interacting with the create secret forms.Upload configuration file image secret.deletes the image secret created from uploaded configuration file": {}, - "Interacting with the create secret forms.Upload configuration file image secret.creates image secret by uploading configuration file": {}, - "Interacting with the create secret forms.Upload configuration file image secret.check for created image secret values from uploaded configuration file": {}, - "Interacting with the create secret forms.SSH source secrets.edits SSH source secret": {}, - "Interacting with the create secret forms.SSH source secrets.deletes the SSH source secret": {}, - "Interacting with the create secret forms.SSH source secrets.creates SSH source secret": {}, - "Interacting with the create secret forms.SSH source secrets.check for edited SSH source secret values": {}, - "Interacting with the create secret forms.SSH source secrets.check for created SSH source secret values": {}, - "Interacting with the create secret forms.Registry credentials image secrets.edits registry credentials image secret": {}, - "Interacting with the create secret forms.Registry credentials image secrets.deletes the registry credentials image secret": {}, - "Interacting with the create secret forms.Registry credentials image secrets.creates registry credentials image secret": {}, - "Interacting with the create secret forms.Registry credentials image secrets.check for edited registry credentials image secret value": {}, - "Interacting with the create secret forms.Registry credentials image secrets.check for created registry credentials image secret values": {}, - "Interacting with the create secret forms.Key/Value secrets.edits Key/Value secret": {}, - "Interacting with the create secret forms.Key/Value secrets.deletes the Key/Value secret": {}, - "Interacting with the create secret forms.Key/Value secrets.creates Key/Value secret": {}, - "Interacting with the create secret forms.Key/Value secrets.check for edited Key/Value secret values": {}, - "Interacting with the create secret forms.Key/Value secrets.check for created Key/Value secret values": {}, - "Interacting with the create secret forms.Basic source secrets.edits basic source secret": {}, - "Interacting with the create secret forms.Basic source secrets.deletes the basic source secret": {}, - "Interacting with the create secret forms.Basic source secrets.creates basic source secret": {}, - "Interacting with the create secret forms.Basic source secrets.check for edited basic source secret values": {}, - "Interacting with the create secret forms.Basic source secrets.check for created basic source secret values": {}, - "Interacting with OperatorHub.Interacting with OperatorHub filters Operators by category": {}, - "Interacting with OperatorHub.Interacting with OperatorHub displays OperatorHub tiles filtered by \"Source\"": {}, - "Interacting with OperatorHub.Interacting with OperatorHub displays OperatorHub tile view with expected available Operators": {}, - "Interacting with OperatorHub.Interacting with OperatorHub displays \"Clear All Filters\" link when text filter removes all Operators from display": {}, - "Interacting with OperatorHub.Interacting with OperatorHub clears text filter when \"Clear All Filters\" link is clicked": {}, - "Interacting with CatalogSource page.Interacting with CatalogSource page renders details about the redhat-operators catalog source": {}, - "Interacting with CatalogSource page.Interacting with CatalogSource page lists all the package manifests for redhat-operators under Operators tab": {}, - "Interacting with CatalogSource page.Interacting with CatalogSource page allows modifying registry poll interval": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-xdenz.Installing \"Red Hat CodeReady Workspaces\" operator in test-xdenz Installs Red Hat CodeReady Workspaces operator in test-xdenz and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-xbzcg.Installing \"Red Hat CodeReady Workspaces\" operator in test-xbzcg Installs Red Hat CodeReady Workspaces operator in test-xbzcg and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-wnday.Installing \"Red Hat CodeReady Workspaces\" operator in test-wnday Installs Red Hat CodeReady Workspaces operator in test-wnday and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-ugzfo.Installing \"Red Hat CodeReady Workspaces\" operator in test-ugzfo Installs Red Hat CodeReady Workspaces operator in test-ugzfo and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-rjvdg.Installing \"Red Hat CodeReady Workspaces\" operator in test-rjvdg Installs Red Hat CodeReady Workspaces operator in test-rjvdg and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-nrdtm.Installing \"Red Hat CodeReady Workspaces\" operator in test-nrdtm Installs Red Hat CodeReady Workspaces operator in test-nrdtm and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-kvhdb.Installing \"Red Hat CodeReady Workspaces\" operator in test-kvhdb Installs Red Hat CodeReady Workspaces operator in test-kvhdb and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-jpmko.Installing \"Red Hat CodeReady Workspaces\" operator in test-jpmko Installs Red Hat CodeReady Workspaces operator in test-jpmko and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-iodab.Installing \"Red Hat CodeReady Workspaces\" operator in test-iodab Installs Red Hat CodeReady Workspaces operator in test-iodab and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-fkpev.Installing \"Red Hat CodeReady Workspaces\" operator in test-fkpev Installs Red Hat CodeReady Workspaces operator in test-fkpev and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-dydzb.Installing \"Red Hat CodeReady Workspaces\" operator in test-dydzb Installs Red Hat CodeReady Workspaces operator in test-dydzb and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-bktmu.Installing \"Red Hat CodeReady Workspaces\" operator in test-bktmu Installs Red Hat CodeReady Workspaces operator in test-bktmu and creates CodeReady Workspaces Cluster operand instance": {}, - "Installing \"Red Hat CodeReady Workspaces\" operator in test-ajmhi.Installing \"Red Hat CodeReady Workspaces\" operator in test-ajmhi Installs Red Hat CodeReady Workspaces operator in test-ajmhi and creates CodeReady Workspaces Cluster operand instance": {}, - "Ingress.Kubernetes resource CRUD operations Ingress search view displays created resource instance": {}, - "Ingress.Kubernetes resource CRUD operations Ingress edits the resource instance": {}, - "Ingress.Kubernetes resource CRUD operations Ingress displays detail view for newly created resource instance": {}, - "Ingress.Kubernetes resource CRUD operations Ingress displays a list view for the resource": {}, - "Ingress.Kubernetes resource CRUD operations Ingress deletes the resource instance": {}, - "Ingress.Kubernetes resource CRUD operations Ingress creates the resource instance": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream search view displays created resource instance": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream edits the resource instance": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream displays detail view for newly created resource instance": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream displays a list view for the resource": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream deletes the resource instance": {}, - "ImageStream.Kubernetes resource CRUD operations ImageStream creates the resource instance": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler search view displays created resource instance": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler edits the resource instance": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler displays detail view for newly created resource instance": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler displays a list view for the resource": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler deletes the resource instance": {}, - "HorizontalPodAutoscaler.Kubernetes resource CRUD operations HorizontalPodAutoscaler creates the resource instance": {}, - "Helm Release.Helm Release Uninstall Helm Release through Context Menu: HR-01-TC03": {}, - "Helm Release.Helm Release Perform the helm chart upgrade for already upgraded helm chart : HR-08-TC02": {}, - "Helm Release.Helm Release Perform Upgrade action on Helm Release through Context Menu: HR-01-TC04": {}, - "Helm Release.Helm Release Perform Rollback action on Helm Release through Context Menu: HR-08-TC03": {}, - "Helm Release.Helm Release Open the Helm tab on the navigation bar when helm charts are present: HR-05-TC05": {}, - "Helm Release.Helm Release Open the Helm tab on the navigation bar when helm charts are absent: HR-05-TC01": {}, - "Helm Release.Helm Release Install Helm Chart page details: HR-05-TC02": {}, - "Helm Release.Helm Release Install Helm Chart from +Add Page using Form View: HR-06-TC04": {}, - "Helm Release.Helm Release Helm release details page: HR-05-TC13": {}, - "Helm Release.Helm Release Filter out deployed Helm Charts: HR-05-TC06": {}, - "Helm Release.Helm Release Create or Select the project namespace": {}, - "Helm Release.Helm Release Context menu options of helm release: HR-01-TC01": {}, - "Helm Release.Helm Release Actions menu on Helm page after helm chart upgrade: HR-08-TC01": {}, - "Globally installing \"Service Binding Operator\" operator in openshift-operators.Globally installing \"Service Binding Operator\" operator in openshift-operators Globally installs Service Binding Operator operator in openshift-operators and creates ServiceBinding operand": {}, - "Filtering and Searching.Filtering and Searching searches for object by kind, label, and name": {}, - "Filtering and Searching.Filtering and Searching searches for object by kind and label": {}, - "Filtering and Searching.Filtering and Searching filters invalid Pod from object detail": {}, - "Filtering and Searching.Filtering and Searching filters from Pods list": {}, - "Filtering and Searching.Filtering and Searching filters Pod from object detail": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which share the same volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which share the same volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (xfs)][Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] volumes should store data": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] volumes should store data": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] volumes should store data": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": {}, - "External Storage [Driver: ebs.csi.aws.com] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": {}, - "Events.Events.event view displays created pod": {}, - "Editing labels.Editing labels Adds a resource instance label, updates the resource instance label, and makes sure the link works": {}, - "Dynamic provision via storage class with options.Dynamic provision using storage class with option volumeBindingMode set to WaitForFirstConsumer": {}, - "Dynamic provision via storage class with options.Dynamic provision using storage class with option volumeBindingMode set to Immediate": {}, - "Deployments.Visiting Overview page Deployments shows deployment details sidebar when item is clicked": {}, - "Deployments.Visiting Overview page Deployments displays a deployment in the overview list page": {}, - "DeploymentConfigs.Visiting Overview page DeploymentConfigs shows deploymentconfig details sidebar when item is clicked": {}, - "DeploymentConfigs.Visiting Overview page DeploymentConfigs displays a deploymentconfig in the overview list page": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig search view displays created resource instance": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig edits the resource instance": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig displays detail view for newly created resource instance": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig displays a list view for the resource": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig deletes the resource instance": {}, - "DeploymentConfig.Kubernetes resource CRUD operations DeploymentConfig creates the resource instance": {}, - "Deployment.Kubernetes resource CRUD operations Deployment search view displays created resource instance": {}, - "Deployment.Kubernetes resource CRUD operations Deployment edits the resource instance": {}, - "Deployment.Kubernetes resource CRUD operations Deployment displays detail view for newly created resource instance": {}, - "Deployment.Kubernetes resource CRUD operations Deployment displays a list view for the resource": {}, - "Deployment.Kubernetes resource CRUD operations Deployment deletes the resource instance": {}, - "Deployment.Kubernetes resource CRUD operations Deployment creates the resource instance": {}, - "Deploy Image.Deploy Image page.should render project/namespace dropdown disabled when in a project context": {}, - "Deploy Image.Deploy Image page.should render applications dropdown disabled": {}, - "Deploy Image.Deploy Image page.should auto fill in the application": {}, - "Deploy Image.Deploy Image page.can be used to search for an image": {}, - "DaemonSets.Visiting Overview page DaemonSets shows daemonset details sidebar when item is clicked": {}, - "DaemonSets.Visiting Overview page DaemonSets displays a daemonset in the overview list page": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet search view displays created resource instance": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet edits the resource instance": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet displays detail view for newly created resource instance": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet displays a list view for the resource": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet deletes the resource instance": {}, - "DaemonSet.Kubernetes resource CRUD operations DaemonSet creates the resource instance": {}, - "CustomResourceDefinitions.CustomResourceDefinitions creates, displays, and deletes `CustomResourceDefinitions` and creates a new custom resource instance": {}, - "CronJob.Kubernetes resource CRUD operations CronJob search view displays created resource instance": {}, - "CronJob.Kubernetes resource CRUD operations CronJob edits the resource instance": {}, - "CronJob.Kubernetes resource CRUD operations CronJob displays detail view for newly created resource instance": {}, - "CronJob.Kubernetes resource CRUD operations CronJob displays a list view for the resource": {}, - "CronJob.Kubernetes resource CRUD operations CronJob deletes the resource instance": {}, - "CronJob.Kubernetes resource CRUD operations CronJob creates the resource instance": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Upload Jar file page details: A-10-TC01": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Getting started resources on Developer perspective": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy secure image with Runtime icon from external registry: A-02-TC02 (example #1)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy image with Runtime icon from internal registry: A-02-TC03 (example #1)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy git workload with devfile from topology page: A-04-TC01": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy Application using Catalog Template \"Other\": A-01-TC02 (example #5)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy Application using Catalog Template \"Middleware\": A-01-TC02 (example #4)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy Application using Catalog Template \"Languages\": A-01-TC02 (example #3)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy Application using Catalog Template \"Databases\": A-01-TC02 (example #2)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Deploy Application using Catalog Template \"CI/CD\": A-01-TC02 (example #1)": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Create the Database from Add page: A-03-TC01": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Create a workload from YAML file: A-07-TC01": {}, - "Create the different workloads from Add page.Create the different workloads from Add page Create a workload from Docker file with \"Deployment\" as resource type: A-05-TC02 (example #1)": {}, - "Create namespace from install operators.Create namespace from install operators disables default catalog sources from operatorHub details page": {}, - "Create key/value secrets.Create key/value secrets Validate a key/value secret whose value is an ascii file ": {}, - "Create key/value secrets.Create key/value secrets Validate a key/value secret whose value is a unicode file ": {}, - "Create key/value secrets.Create key/value secrets Validate a key/value secret whose value is a binary file ": {}, - "Create a test namespace.creates test namespace if necessary": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap search view displays created resource instance": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap edits the resource instance": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap displays detail view for newly created resource instance": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap displays a list view for the resource": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap deletes the resource instance": {}, - "ConfigMap.Kubernetes resource CRUD operations ConfigMap creates the resource instance": {}, - "Cluster Dashboard.Utilization Card.has duration dropdown": {}, - "Cluster Dashboard.Utilization Card.has all items": {}, - "Cluster Dashboard.Status Card.has health indicators": {}, - "Cluster Dashboard.Status Card.has View alerts link": {}, - "Cluster Dashboard.Inventory Card.has all items": {}, - "Cluster Dashboard.Details Card.has all fields populated": {}, - "Cluster Dashboard.Details Card.has View settings link": {}, - "Cluster Dashboard.Activity Card.has View events link": {}, - "Cluster Dashboard.Activity Card.has Pause events button": {}, - "CSI testing related feature.Configure 'Retain' reclaim policy (outline example : | gp2-csi |)": {}, - "CSI testing related feature.Check CSI Driver Operator installation (outline example : | ebs.csi.aws.com | gp2-csi | aws-ebs-csi-driver-operator | aws-ebs-csi-driver-controller | aws-ebs-csi-driver-node |)": {}, - "CSI testing related feature.CSI dynamic provisioning with different type (outline example : | gp2-csi | st1 | 125Gi |)": {}, - "CSI testing related feature.CSI dynamic provisioning with different type (outline example : | gp2-csi | sc1 | 125Gi |)": {}, - "CRD extensions.ConsoleNotification CRD.displays the ConsoleNotification instance in its new location": {}, - "CRD extensions.ConsoleNotification CRD.displays the ConsoleNotification instance": {}, - "CRD extensions.ConsoleNotification CRD.displays detail view for ConsoleNotification instance": {}, - "CRD extensions.ConsoleNotification CRD.displays YAML editor for modifying the location of ConsoleNotification instance": {}, - "CRD extensions.ConsoleNotification CRD.displays YAML editor for creating a new ConsoleNotification instance": {}, - "CRD extensions.ConsoleNotification CRD.deletes the ConsoleNotification instance": {}, - "CRD extensions.ConsoleNotification CRD.creates a new ConsoleNotification instance": {}, - "CRD extensions.ConsoleLink CRD.displays the ConsoleLink instance in the user menu": {}, - "CRD extensions.ConsoleLink CRD.displays the ConsoleLink instance in the help menu": {}, - "CRD extensions.ConsoleLink CRD.displays detail view for ConsoleLink user menu instance": {}, - "CRD extensions.ConsoleLink CRD.displays detail view for ConsoleLink help menu instance": {}, - "CRD extensions.ConsoleLink CRD.displays YAML editor for creating a new ConsoleLink user menu instance": {}, - "CRD extensions.ConsoleLink CRD.displays YAML editor for creating a new ConsoleLink help menu instance": {}, - "CRD extensions.ConsoleLink CRD.deletes the ConsoleLink user menu instance": {}, - "CRD extensions.ConsoleLink CRD.deletes the ConsoleLink help menu instance": {}, - "CRD extensions.ConsoleLink CRD.creates a new ConsoleLink user menu instance": {}, - "CRD extensions.ConsoleLink CRD.creates a new ConsoleLink help menu instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.does not display the ConsoleExternalLogLink instance on the test pod": {}, - "CRD extensions.ConsoleExternalLogLink CRD.displays the ConsoleExternalLogLink instance on the test pod": {}, - "CRD extensions.ConsoleExternalLogLink CRD.displays detail view for ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.displays YAML editor for creating a new ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.displays YAML editor for adding namespaceFilter to the ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.deletes the test pod": {}, - "CRD extensions.ConsoleExternalLogLink CRD.deletes the ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.creates a new test pod to display the ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleExternalLogLink CRD.creates a new ConsoleExternalLogLink instance": {}, - "CRD extensions.ConsoleClIDownload CRD.displays the ConsoleCLIDownload instance on the Command Line Tools page": {}, - "CRD extensions.ConsoleClIDownload CRD.displays detail view for ConsoleCLIDownload instance": {}, - "CRD extensions.ConsoleClIDownload CRD.displays YAML editor for creating a new ConsoleCLIDownload instance": {}, - "CRD extensions.ConsoleClIDownload CRD.deletes the ConsoleCLIDownload instance": {}, - "CRD extensions.ConsoleClIDownload CRD.creates a new ConsoleCLIDownload instance": {}, - "Bulk import operation.Bulk import operation successfully import three yaml secret definitions": {}, - "Bulk import operation.Bulk import operation fail to import missing namespaced resources (server validation)": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig search view displays created resource instance": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig edits the resource instance": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig displays detail view for newly created resource instance": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig displays a list view for the resource": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig deletes the resource instance": {}, - "BuildConfig.Kubernetes resource CRUD operations BuildConfig creates the resource instance": {}, - "Backup from one node and recover on another.service-upgrade": {}, - "Backup from one node and recover on another.restore_different_node": {}, - "Backup from one node and recover on another.[sig-storage] [sig-api-machinery] secret-upgrade": {}, - "Backup from one node and recover on another.[sig-apps] statefulset-upgrade": {}, - "Backup from one node and recover on another.[sig-apps] replicaset-upgrade": {}, - "Backup from one node and recover on another.[sig-apps] deployment-upgrade": {}, - "Backup from one node and recover on another.[sig-apps] daemonset-upgrade": {}, - "Auth test.Login test.logs in as kubeadmin user": {}, - "Auth test.Auth test logs in as 'test' user via htpasswd identity provider": {}, - "Auth test.Auth test log in as 'kubeadmin' user": {}, - "Annotations.Annotations Creates, Edits, Updates, and Deletes Annotations": {}, - "All in one volume.Project secrets, configmap not explicitly defining keys for pathing within a volume": {}, - "All in one volume.Project secrets, configmap and downward API into the same volume with normal keys and path": {}, - "Alertmanager: YAML.saves Alertmanager YAML": {}, - "Alertmanager: YAML.displays the Alertmanager YAML page": {}, - "Alertmanager: Configuration.prevents deletion of default receiver": {}, - "Alertmanager: Configuration.prevents deletion and form edit of a receiver with sub-route": {}, - "Alertmanager: Configuration.launches Alert Routing modal, edits and saves correctly": {}, - "Alertmanager: Configuration.edits a receiver correctly": {}, - "Alertmanager: Configuration.displays the Alertmanager Configuration Details page": {}, - "Alertmanager: Configuration.deletes a receiver correctly": {}, - "Alertmanager: Configuration.creates a receiver correctly": {}, - "Add Secret to Workloads.Add Secret to Workloads as Volume.Add Secret to Deployment as Vol": {}, - "Add Secret to Workloads.Add Secret to Workloads as Enviroment Variables.Add Secret to Deployment as Env": {}, - "operator install cloud-credential": {}, - "operator install cloud-controller-manager": {}, - "operator conditions cloud-credential": {}, - "operator conditions cloud-controller-manager": {}, - "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should serve a basic endpoint from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-node] Docker Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] Deployment deployment should support proportional scaling [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted. [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIStorageCapacity CSIStorageCapacity used, insufficient capacity [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSIStorageCapacity CSIStorageCapacity unused [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI workload information using mock driver should not be passed when podInfoOnMount=nil [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI workload information using mock driver should not be passed when CSIDriver does not exist [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI workload information using mock driver contain ephemeral=true when using inline volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which share the same volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] [Feature:Example] Secret should create a pod that reads a secret [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [Feature:ProbeTerminationGracePeriod] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [Feature:ProbeTerminationGracePeriod] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Probing container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] [Feature:TTLAfterFinished] job should be deleted once it finishes after TTL seconds [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] [Serial:Self] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] pods should successfully create sandboxes by getting pod": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "Operator upgrade kube-controller-manager": {}, - "Operator upgrade ingress": {}, - "Operator upgrade etcd": {}, - "operator conditions kube-storage-version-migrator": {}, - "operator conditions kube-controller-manager": {}, - "operator conditions ingress": {}, - "[sig-storage] [sig-api-machinery] secret-upgrade": {}, - "[sig-apps] replicaset-upgrade": {}, - "[sig-apps] deployment-upgrade": {}, - "[sig-apps] daemonset-upgrade": {}, - "[sig-cluster-lifecycle] ClusterOperators are available and not degraded after upgrade": {}, - "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] ReplicaSet Replace and Patch tests [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI mock volume CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-cli] Kubectl client Simple pod should return command exit codes running a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should work with Ingress,Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "Operator upgrade network": {}, - "Operator upgrade machine-api": {}, - "operator conditions network": {}, - "[sig-apps] job-upgrade": {}, - "[sig-mco] Machine config pools complete upgrade": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Mount propagation should propagate mounts within defined scopes [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "operator conditions machine-api": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-operator] OLM should be installed with operatorgroups at version v1 [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestMultipleImageChangeBuildTriggers [Suite:openshift/conformance/parallel]": {}, - "[sig-cluster-lifecycle][Feature:Machines] Managed cluster should have machine resources [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should ensure control plane operators do not make themselves unevictable [Suite:openshift/conformance/parallel]": {}, - "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2 [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes GCEPD should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] PersistentVolumes GCEPD should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-operator] OLM should be installed with subscriptions at version v1alpha1 [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] OLM should be installed with installplans at version v1alpha1 [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should set requests but not limits [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should ensure control plane pods do not run in best-effort QoS [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] ClusterOperators should define at least one related object that is not a namespace [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for security.openshift.io/v1, Resource=rangeallocations [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=images [Suite:openshift/conformance/parallel]": {}, - "Operator upgrade monitoring": {}, - "Operator upgrade image-registry": {}, - "operator conditions monitoring": {}, - "operator conditions image-registry": {}, - "operator conditions etcd": {}, - "[bz-Cluster Version Operator] Verify object deletions after upgrade success": {}, - "[sig-arch] Only known images used by tests": {}, - "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-operator] OLM should be installed with clusterserviceversions at version v1alpha1 [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] OLM should be installed with catalogsources at version v1alpha1 [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] OLM should Implement packages API server and list packagemanifest info with namespace not NULL [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance object kinds test should create and delete objects from varying API groups [Suite:openshift/conformance/parallel]": {}, - "[sig-coreos] [Conformance] CoreOS bootimages TestBootimagesPresent [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when there are no restrictions should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleEndpoint should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] ClusterOperators should define at least one namespace in their lists of related objects [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestDeploymentConfigDefaults [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=brokertemplateinstances [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclients [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for apps.openshift.io/v1, Resource=deploymentconfigs [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ResourceQuota] Object count should properly count the number of imagestreams resources [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should only include cluster daemonsets that have maxUnavailable or maxSurge update of 10 percent or maxUnavailable of 33 percent [Suite:openshift/conformance/parallel]": {}, - "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-imageregistry] Image registry remain available": {}, - "[sig-storage] Managed cluster should have no crashlooping recycler pods over four minutes [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] an end user can use OLM Report Upgradeable in OLM ClusterOperators status [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] OLM should be installed with packagemanifests at version v1 [Suite:openshift/conformance/parallel]": {}, - "[sig-node] should override timeoutGracePeriodSeconds when annotation is set [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the object has the resolve-names annotation [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:OpenShiftControllerManager] TestDockercfgTokenDeletedController [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc explain should contain spec+status for builtinTypes [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm images [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is permitted by RBR should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestBootstrapPolicySelfSubjectAccessReviews should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedImpersonation should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token request URL [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the root URL [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should have operators on the cluster version [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should ensure platform components have system-* priority class associated [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation of running deployment [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=groups [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=imagestreams [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer] authenticated browser should get a 200 from / [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery] APIServer CR fields validation additionalCORSAllowedOrigins [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] OAuth server should use http1.1 only to prevent http2 connection reuse [Suite:openshift/conformance/parallel]": {}, - "[sig-network][endpoints] admission blocks manual creation of Endpoints pointing to the cluster or service network [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc secret creates and retrieves expected [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc can get list of nodes [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc annotate pod [Suite:openshift/conformance/parallel]": {}, - "[sig-network] network isolation when using OpenshiftSDN in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on different nodes [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should have non-Pod host cAdvisor metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:Image] oc tag should work when only imagestreams api is available [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc builds patch buildconfig [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc builds get buildconfig [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod [Feature:ServiceInternalTrafficPolicy] [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-sippy] install should not timeout": {}, - "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]": {}, - "[sig-operator] OLM should have imagePullPolicy:IfNotPresent on thier deployments [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:Image] signature TestImageAddSignature [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTI [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDockerWithConfigChange [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDocker [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the Deployment gets the resolve-names annotation later [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] template-api TestTemplateTransformationFromConfig [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc explain list uncovered GroupVersionResources [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc debug ensure debug does not depend on a container actually existing for the selected resource [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm role-reapers [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm must-gather runs successfully with options [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm groups [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is not already bound and is not permitted by any RBR should fail [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is already bound should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestSelfSubjectAccessReviewsNonExistingNamespace should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleBindingEndpoint should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for when there is only one IDP [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] ocp payload should be based on existing source OLM version should contain the source commit id [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should ensure pods use downstream images from our release image with proper ImagePullPolicy [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_MultipleICTs [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs ignores deployer and lets the config with a NewReplicationControllerCreated reason should let the deployment config with a NewReplicationControllerCreated reason [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=users [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templates [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templateinstances [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclientauthorizations [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthauthorizetokens [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=buildconfigs [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc service creates and deletes services [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc project --show-labels works for projects [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc basics can create and interact with a list of resources [Suite:openshift/conformance/parallel]": {}, - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitStatusUpdate [Suite:openshift/conformance/serial]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitSpecUpdate [Suite:openshift/conformance/serial]": {}, - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamTagLifecycleHook [Suite:openshift/conformance/serial]": {}, - "[sig-auth][Feature:LDAP][Serial] ldap group sync can sync groups from ldap [Suite:openshift/conformance/serial]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should have a AlertmanagerReceiversNotConfigured alert in firing state [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc debug ensure it works with image streams [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should serve routes that were created from an ingress [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] services basic functionality should allow connections to another pod on the same node via a service IP [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:Image] oc tag should preserve image reference for external images [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTIWithConfigChange [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should identify a deleted image as missing [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance cross-namespace test should create and delete objects across namespaces [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] template-api TestTemplate [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc observe works as expected [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm serviceaccounts [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm node-logs [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm build-chain [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImageStream [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPing [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] remove all builds when build configuration is removed oc delete buildconfig should start builds and delete the buildconfig [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] buildconfig secret injector should inject secrets to the appropriate buildconfigs [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Rolebinding restrictions tests single project should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding that also contains system:non-existing users should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestTokensWithIllegalScopes should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedTokens should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopeEscalations should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestBrowserSafeAuthorizer should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleEndpoint should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyEndpointConfirmNoEscalation should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the grant URL [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] OAuth server has the correct token and certificate fallback semantics [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] OAuth Authenticator accepts sha256 access tokens [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] ClientSecretWithPlus should create oauthclient [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:Authentication] TestFrontProxy should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange_nonAutomatic [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_configChange [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should immediately start a new deployment [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=builds [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc status returns expected help messages [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc basics can describe an OAuth access token [Suite:openshift/conformance/parallel]": {}, - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-int.kubeconfig\" should be present on all masters and work [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-network][Feature:Router] The HAProxy router should serve the correct routes when scoped to a single namespace and label set [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should override the route host with a custom value [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should provide named network metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc rsh specific flags should work well when access to a remote shell [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs paused should disable actions on deployments [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "Symptom Detection.Undiagnosed panic detected in pod": {}, - "[sig-imageregistry][Feature:ImageInfo] Image info should display information about images [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] Internal connectivity for TCP and UDP on ports 9000-9999 is allowed [Serial:Self] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] NetworkPolicyLegacy [LinuxOnly] NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network] pods should successfully create sandboxes by not timing out": {}, - "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-node][Late] should not have pod creation failures due to systemd timeouts [Suite:openshift/conformance/parallel]": {}, - "[sig-arch][Late] clients should not use APIs that are removed in upcoming releases [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer][Late] kubelet terminates kube-apiserver gracefully [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer][Late] kube-apiserver terminates within graceful termination period [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and stop sending requests [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and don't send request early [Suite:openshift/conformance/parallel]": {}, - "[sig-storage][Late] Metrics should report short mount times [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-storage][Late] Metrics should report short attach times [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation][Late] Alerts shouldn't exceed the 500 series limit of total series sent via telemetry from each cluster [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] network isolation when using OpenshiftSDN in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on the same node [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:Image] signature TestImageRemoveSignature [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageLookup] Image policy should update standard Kube object image fields when local names are on [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance creation with invalid object reports error should report a failure on creation [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:OpenShiftControllerManager] TestAutomaticCreationOfPullSecrets [Suite:openshift/conformance/parallel]": {}, - "[sig-cluster-lifecycle] CSRs from machines that are not recognized by the cloud provider are not approved [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc explain should contain proper fields description for special types [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc debug does not require a real resource on the server [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc debug dissect deployment config debug [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm must-gather when looking at the audit logs [sig-node] kubelet runs apiserver processes strictly sequentially in order to not risk audit log corruption [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm must-gather runs successfully for audit logs [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm cluster-role-reapers [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc --request-timeout works as expected [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][webhook] TestWebhook [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:UserAPI] users can manipulate groups [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:SecurityContextConstraints] TestPodUpdateSCCEnforcement [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:SecurityContextConstraints] TestAllowedSCCViaRBAC [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a RBAC rolebinding when subject is not already bound and is not permitted by any RBR should fail [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestUnprivilegedNewProject [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestProjectIsNamespace should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestUnknownScopes should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReviewAPIGroup should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleBindingEndpoint should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] well-known endpoint should be reachable [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the bootstrap IDP [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the login URL for the allow all IDP [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_manual [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=identities [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for route.openshift.io/v1, Resource=routes [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthaccesstokens [Suite:openshift/conformance/parallel]": {}, - "[sig-network][endpoints] admission blocks manual creation of EndpointSlices pointing to the cluster or service network [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Multus] should use multus to create net1 device from network-attachment-definition [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge][Feature:Idling] Idling with a single service and ReplicationController should idle the service and ReplicationController properly [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc label pod [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc basics can create deploymentconfig and clusterquota [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestScopedProjectAccess should succeed [Suite:openshift/conformance/parallel]": {}, - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost.kubeconfig\" should be present on all masters and work [Suite:openshift/conformance/parallel/minimal]": {}, - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost-recovery.kubeconfig\" should be present on all masters and work [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-network] services when using OpenshiftSDN in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on different nodes via service IPs [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should expose the profiling endpoints [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should expose a health check on the metrics port [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should enable openshift-monitoring to pull metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs won't deploy RC with unresolved images when patched with empty image [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with enhanced status should include various info in status [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should respect image stream tag reference policy resolve the image pull spec [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs generation should deploy based on a status version bump [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a docker template without an output image reference defined [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should support reencrypt to services backed by a serving certificate automatically [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should respond with 503 to unrecognized hosts [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cluster-lifecycle][Feature:Machines][Early] Managed cluster should have same number of Machines and Nodes [Suite:openshift/conformance/parallel]": {}, - "[sig-network] services when using OpenshiftSDN in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on the same node via service IPs [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm who-can [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm user-creation [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImage [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:UserAPI] groups should work [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a token authorization flow [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the token URL [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the logout URL [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:LDAP] LDAP should start an OpenLDAP test server [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with config change in case the deployment is still running [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:ClusterResourceQuota] Cluster resource quota should control resource limits across namespaces [Suite:openshift/conformance/parallel]": {}, - "[sig-api-machinery][Feature:APIServer] anonymous browsers should get a 403 from / [Suite:openshift/conformance/parallel]": {}, - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-ext.kubeconfig\" should be present on all masters and work [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-cli] oc statefulset creates and deletes statefulsets [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc basics can patch resources [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should override the route host for overridden domains with a custom value [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster shouldn't have failing rules evaluation [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should provide ingress metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should have important platform topology metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when changing image change trigger should successfully trigger from an updated image [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation after deployer pod succeeded [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cluster-lifecycle] Pods cannot access the /config/master API endpoint [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should set Forwarded headers appropriately [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation][Late] OpenShift alerting rules should have description and summary annotations [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation][Late] OpenShift alerting rules should have a valid severity label [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation][Late] OpenShift alerting rules should have a runbook_url annotation if the alert is critical [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] Managed cluster should expose cluster services outside the cluster [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it [Skipped:Disconnected] [Suite:openshift/conformance/parallel/minimal]": {}, - "Operator upgrade machine-config": {}, - "promote-release-openshift-machine-os-content-e2e-aws-4.10-ppc64le.Overall": {}, - "[sig-network][Feature:Router] The HAProxy router converges when multiple routers are writing status [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router converges when multiple routers are writing conflicting status [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustom [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance security tests should pass security tests [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc explain should contain proper spec+status for CRDs [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm ui-project-commands [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm role-selectors [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm new-project [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestInvalidRoleRefs should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] The default cluster RBAC policy should have correct RBAC rules [Suite:openshift/conformance/parallel]": {}, - "[sig-arch] [Conformance] FIPS TestFIPS [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-apps][Feature:OpenShiftControllerManager] TestDeployScale [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs initially should not deploy if pods never transition to ready [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should serve a route that points to two services and respect weights [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should run even if it has no access to update status [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageTriggers] Annotation trigger reconciles after the image is overwritten [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:SecurityContextConstraints] TestPodDefaultCapabilities [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with env in params referencing the configmap should expand the config map key to a value [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs viewing rollout history should print the rollout history [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs rolled back should rollback to an older deployment [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should adhere to Three Laws of Controllers [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:Image] oc tag should change image reference for internal images [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc builds new-build [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] verify /run filesystem contents are writeable using a simple Docker Strategy Build [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should allow multicast traffic in namespaces where it is enabled [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should report telemetry [Serial] [Late] [Skipped:Disconnected] [Suite:openshift/conformance/serial]": {}, - "[sig-cluster-lifecycle] cluster upgrade should complete in 90.00 minutes": {}, - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that do not expire works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Headers] expected headers returned from the authorize URL [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [Suite:openshift/conformance/parallel]": {}, - "Operator upgrade openshift-apiserver": {}, - "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for source strategy builds [Suite:openshift/conformance/parallel]": {}, - "[sig-network] services basic functionality should allow connections to another pod on a different node via a service IP [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with test deployments should run a deployment to completion and then scale to zero [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should only deploy the last deployment [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should return layers from tagged images [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] result image should have proper labels set Docker build from a template should create a image from \"test-docker-build.json\" template with proper Docker labels [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] Optimized image builds should succeed [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should pass the gRPC interoperability tests [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-devex][Feature:Templates] templateinstance impersonation tests should pass impersonation update tests [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance impersonation tests should pass impersonation deletion tests [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestProjectWatchWithSelectionPredicate should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:ProjectAPI] TestProjectWatch should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age to generate tokens that expire shortly works as expected when using a code authorization flow [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc can run inside of a busybox container [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for docker strategy builds [Suite:openshift/conformance/parallel]": {}, - "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should block multicast traffic in namespaces where it is disabled [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Router] The HAProxy router should expose prometheus metrics for a route [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig buildconfigs should have a default history limit set when created via the group api [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with failing hook should get all logs from retried hooks [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with custom deployments should run the custom deployment steps [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation][sig-builds][Feature:Builds] Prometheus when installed on the cluster should start and expose a secured proxy and verify build metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc debug deployment configs from a build [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for docker [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "Operator upgrade authentication": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReview should succeed [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in sti build environment variable references [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] oc new-app should fail with a --name longer than 58 characters [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with revision history limits should never persist more old deployments than acceptable after being observed by the controller [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with minimum ready seconds set should not transition the deployment to Complete before satisfied [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] verify /run filesystem contents do not have unexpected content using a simple Docker Strategy Build [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] pods should successfully create sandboxes by writing network status": {}, - "operator conditions machine-config": {}, - "[sig-devex][Feature:Templates] templateinstance impersonation tests should pass impersonation creation tests [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge][Feature:Idling] Unidling should work with UDP [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune errored builds based on the failedBuildsHistoryLimit setting [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune canceled builds based on the failedBuildsHistoryLimit setting [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune builds after a buildConfig change [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][pullsecret] docker build using a pull secret Building from a template should create a docker build that pulls using a secret run it [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageExtract] Image extract should extract content from an image [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-devex] check registry.redhat.io is available and samples operator can import sample imagestreams run sample related validations [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in docker build environment variable references [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune failed builds based on the failedBuildsHistoryLimit setting [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-storage] [Serial] Volume metrics should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-builds][Feature:Builds] clone repository using git:// protocol should clone using git:// if no proxy is configured [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-instrumentation] Prometheus metrics should be available after an upgrade": {}, - "[sig-devex][Feature:Templates] templateinstance readiness test should report failed soon after an annotated objects has failed [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] oc new-app should succeed with an imagestream [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs adoption will orphan all RCs and adopt them back when recreated [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] Multi-stage image builds should succeed [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-node] Managed cluster should report ready nodes the entire duration of the test run [Late] [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm must-gather runs successfully [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster should start and expose a secured proxy and unsecured metrics [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] imagechangetriggers imagechangetriggers should trigger builds of all types [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "Symptom Detection.Node process segfaulted": {}, - "[sig-network-edge][Feature:Idling] Unidling should work with TCP (when fully idled) [Skipped:Network/OVNKubernetes] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with a trigger used by different containers [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in docker build environment variables [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge] OAuth remains available via cluster frontend ingress using reused connections": {}, - "[sig-cli] oc builds complex build start-build [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]": {}, - "[sig-imageregistry][Serial] Image signature workflow can push a signed image to openshift registry and verify it [Suite:openshift/conformance/serial]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should pass the http2 tests [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-cli] oc builds complex build webhooks CRUD [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune completed builds based on the successfulBuildsHistoryLimit setting [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with multiple triggers [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should pass the h2spec conformance tests [Suite:openshift/conformance/parallel/minimal]": {}, - "[sig-instrumentation] Prometheus when installed on the cluster shouldn't report any alerts in firing state apart from Watchdog and AlertmanagerReceiversNotConfigured [Early] [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-builds][Feature:Builds] custom build with buildah being created from new-build should complete build with custom builder image [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm storage-admin [Suite:openshift/conformance/parallel]": {}, - "[sig-operator] an end user can use OLM can subscribe to the operator [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]": {}, - "[sig-builds][Feature:Builds] oc new-app should succeed with a --name of 58 characters [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "promote-release-openshift-machine-os-content-e2e-aws-4.10-s390x.Overall": {}, - "[sig-builds][Feature:Builds] build can reference a cluster service with a build being created from new-build should be able to run a build that references a cluster service [Skipped:Disconnected] [Suite:openshift/conformance/parallel]": {}, - "[sig-cli] oc adm policy [Suite:openshift/conformance/parallel]": {}, - "[sig-network][Feature:Network Policy Audit logging] when using openshift ovn-kubernetes should ensure acl logs are created and correct [Suite:openshift/conformance/parallel]": {}, - } -) diff --git a/pkg/testsuites/standard_suites.go b/pkg/testsuites/standard_suites.go index 8e74b441b51d..b1509f3032bf 100644 --- a/pkg/testsuites/standard_suites.go +++ b/pkg/testsuites/standard_suites.go @@ -1,12 +1,17 @@ package testsuites import ( - "strings" + "context" + "fmt" + "os" "time" - "github.com/openshift/origin/pkg/test/ginkgo" + "github.com/sirupsen/logrus" "k8s.io/kubectl/pkg/util/templates" + "github.com/openshift/origin/pkg/test/extensions" + "github.com/openshift/origin/pkg/test/ginkgo" + // these register framework.NewFrameworkExtensions responsible for // executing post-test actions, here debug and metrics gathering // see https://github.com/kubernetes/kubernetes/blob/v1.26.0/test/e2e/framework/framework.go#L175-L181 @@ -18,7 +23,7 @@ import ( _ "github.com/openshift/origin/test/extended/util/annotate/generated" ) -func StandardTestSuites() []*ginkgo.TestSuite { +func InternalTestSuites() []*ginkgo.TestSuite { copied := make([]*ginkgo.TestSuite, 0, len(staticSuites)) for i := range staticSuites { curr := staticSuites[i] @@ -27,6 +32,79 @@ func StandardTestSuites() []*ginkgo.TestSuite { return copied } +// AllTestSuites returns all test suites including internal suites and extension suites. +// It validates that no suite names are duplicated across internal and extension suites. +func AllTestSuites(ctx context.Context) ([]*ginkgo.TestSuite, error) { + suites := InternalTestSuites() + + if len(os.Getenv("OPENSHIFT_SKIP_EXTERNAL_TESTS")) > 0 { + logrus.Warning("Using built-in suites only due to OPENSHIFT_SKIP_EXTERNAL_TESTS being set") + return suites, nil + } + + // Create a map to track suite names and their sources for better error reporting + suiteNameToSources := make(map[string][]string) + for _, suite := range suites { + suiteNameToSources[suite.Name] = []string{"internal"} + } + + // Extract all test binaries from the release payload + cleanup, binaries, err := extensions.ExtractAllTestBinaries(ctx, 10) + if err != nil { + return nil, fmt.Errorf("failed to extract test binaries: %w", err) + } + defer cleanup() + + // Get info from all binaries + extensionInfos, err := binaries.Info(ctx, 4) + if err != nil { + return nil, fmt.Errorf("failed to get extension info: %w", err) + } + + for _, e := range extensionInfos { + for _, s := range e.Suites { + extensionSource := fmt.Sprintf("extension %s:%s:%s", e.Component.Product, e.Component.Kind, e.Component.Name) + if e.Source.SourceImage != "" { + extensionSource = fmt.Sprintf("extension %s:%s:%s (image: %s)", e.Component.Product, e.Component.Kind, e.Component.Name, e.Source.SourceImage) + } + + // Check if suite name conflicts with any existing suite name (internal or extension) + if existingSources, exists := suiteNameToSources[s.Name]; exists { + allSources := append(existingSources, extensionSource) + return nil, fmt.Errorf("suite %q is declared by multiple sources: %v - there can be only one canonical source of a suite", + s.Name, allSources) + } + + // Add the suite name and its source to our tracking map + suiteNameToSources[s.Name] = []string{extensionSource} + + suites = append(suites, &ginkgo.TestSuite{ + Name: s.Name, + Description: s.Description, + Kind: ginkgo.KindExternal, + Extension: e, + Qualifiers: s.Qualifiers, + }) + } + } + + // Now handle setting qualifiers for parent suites once we've assembled the complete + // list of suites. + for _, e := range extensionInfos { + for _, s := range e.Suites { + for _, p := range s.Parents { + for _, parent := range suites { + if parent.Name == p { + parent.Qualifiers = append(parent.Qualifiers, s.Qualifiers...) + } + } + } + } + } + + return suites, nil +} + // staticSuites are all known test suites this binary should run var staticSuites = []ginkgo.TestSuite{ { @@ -34,11 +112,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that ensure an OpenShift cluster and components are working properly. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/conformance/") + Qualifiers: []string{ + withExcludedTestsFilter("name.contains('[Suite:openshift/conformance/')"), }, Parallelism: 30, }, @@ -47,11 +122,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Only the portion of the openshift/conformance test suite that run in parallel. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/conformance/parallel") + Qualifiers: []string{ + withExcludedTestsFilter("name.contains('[Suite:openshift/conformance/parallel')"), }, Parallelism: 30, MaximumAllowedFlakes: 15, @@ -61,11 +133,9 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Only the portion of the openshift/conformance test suite that run serially. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/conformance/serial") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + // Standard early and late tests are included in the serial suite + withExcludedTestsFilter(withStandardEarlyOrLateTests("name.contains('[Suite:openshift/conformance/serial')")), }, TestTimeout: 40 * time.Minute, }, @@ -75,44 +145,20 @@ var staticSuites = []ginkgo.TestSuite{ The disruptive test suite. Disruptive tests interrupt the cluster function such as by stopping/restarting the control plane or changing the global cluster configuration in a way that can affect other tests. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - // excluded due to stopped instance handling until https://bugzilla.redhat.com/show_bug.cgi?id=1905709 is fixed - if strings.Contains(name, "Cluster should survive master and worker failure and recover with machine health checks") { - return false - } - return strings.Contains(name, "[Feature:EtcdRecovery]") || strings.Contains(name, "[Feature:NodeRecovery]") || isStandardEarlyTest(name) - + Qualifiers: []string{ + withStandardEarlyTests(`name.contains("[Feature:EtcdRecovery]") || name.contains("[Feature:NodeRecovery]")`), }, // Duration of the quorum restore test exceeds 60 minutes. TestTimeout: 90 * time.Minute, ClusterStabilityDuringTest: ginkgo.Disruptive, }, - { - Name: "kubernetes/conformance", - Description: templates.LongDesc(` - The default Kubernetes conformance suite. - `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:k8s]") && strings.Contains(name, "[Conformance]") - }, - Parallelism: 30, - }, { Name: "openshift/build", Description: templates.LongDesc(` Tests that exercise the OpenShift build functionality. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Feature:Builds]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:Builds]')"), }, Parallelism: 7, // TODO: Builds are really flaky right now, remove when we land perf updates and fix io on workers @@ -125,11 +171,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that exercise the OpenShift template functionality. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Feature:Templates]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:Templates]')"), }, Parallelism: 1, }, @@ -138,11 +181,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that exercise the OpenShift image-registry functionality. `), - Matches: func(name string) bool { - if isDisabled(name) || strings.Contains(name, "[Local]") { - return false - } - return strings.Contains(name, "[sig-imageregistry]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[sig-imageregistry]') && !name.contains('[Local]')"), }, }, { @@ -150,11 +190,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that exercise language and tooling images shipped as part of OpenShift. `), - Matches: func(name string) bool { - if isDisabled(name) || strings.Contains(name, "[Local]") { - return false - } - return strings.Contains(name, "[Feature:ImageEcosystem]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:ImageEcosystem]') && !name.contains('[Local]')"), }, Parallelism: 7, TestTimeout: 20 * time.Minute, @@ -164,11 +201,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that exercise the OpenShift / Jenkins integrations provided by the OpenShift Jenkins image/plugins and the Pipeline Build Strategy. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Feature:Jenkins]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:Jenkins]')"), }, Parallelism: 4, TestTimeout: 20 * time.Minute, @@ -178,11 +212,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that exercise the OpenShift / Jenkins integrations provided by the OpenShift Jenkins image/plugins and the Pipeline Build Strategy. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Feature:JenkinsRHELImagesOnly]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:JenkinsRHELImagesOnly]')"), }, Parallelism: 4, TestTimeout: 20 * time.Minute, @@ -192,11 +223,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Tests that verify the scalability characteristics of the cluster. Currently this is focused on core performance behaviors and preventing regressions. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/scalability]") + Qualifiers: []string{ + "name.contains('[Suite:openshift/scalability')", }, Parallelism: 1, TestTimeout: 20 * time.Minute, @@ -206,11 +234,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Run only tests that are excluded from conformance. Makes identifying omitted tests easier. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return !strings.Contains(name, "[Suite:openshift/conformance/") + Qualifiers: []string{ + "!name.contains('[Suite:openshift/conformance/')", }, }, { @@ -218,11 +243,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Run only tests for test-cmd. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Feature:LegacyCommandTests]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests("name.contains('[Feature:LegacyCommandTests]')"), }, }, { @@ -241,11 +263,8 @@ var staticSuites = []ginkgo.TestSuite{ of the file. Replace "master" with the OpenShift version you are testing against, e.g. "blob/release-4.17/test/..." `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "External Storage [Driver:") && !strings.Contains(name, "[Disruptive]") + Qualifiers: []string{ + `name.contains("External Storage [Driver:") && !name.contains("[Disruptive]")`, }, }, { @@ -253,11 +272,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite performs IPsec e2e tests covering control plane and data plane for east west and north south traffic scenarios. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/network/ipsec") + Qualifiers: []string{ + `name.contains("[Suite:openshift/network/ipsec")`, }, Parallelism: 1, TestTimeout: 20 * time.Minute, @@ -267,19 +283,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite repeatedly verifies the networking function of the cluster in parallel to find flakes. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - // Skip NetworkPolicy tests for https://bugzilla.redhat.com/show_bug.cgi?id=1980141 - if strings.Contains(name, "[Feature:NetworkPolicy]") { - return false - } - // Serial:Self are tests that can't be run in parallel with a copy of itself - if strings.Contains(name, "[Serial:Self]") { - return false - } - return (strings.Contains(name, "[Suite:openshift/conformance/") && strings.Contains(name, "[sig-network]")) || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests(`!name.contains("[Feature:NetworkPolicy]") && !name.contains("[Serial:Self]") && name.contains("[Suite:openshift/conformance/") && name.contains("[sig-network]")`), }, Parallelism: 60, Count: 12, @@ -290,11 +295,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite performs CNI live migration either from SDN to OVN-Kubernetes or OVN-Kubernetes to SDN. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/network/live-migration") + Qualifiers: []string{ + `name.contains("[Suite:openshift/network/live-migration")`, }, Count: 1, TestTimeout: 4 * time.Hour, @@ -305,11 +307,11 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` The conformance testing suite for certified third-party CNI plugins. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return inCNISuite(name) + Qualifiers: []string{ + `name.contains("[Suite:k8s]") && name.contains("[sig-network]") && + (name.contains("[Conformance]") || + (name.contains("NetworkPolicy") && !name.contains("named port")) || + name.contains("[Feature:IPv6DualStack]"))`, }, }, { @@ -317,37 +319,18 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` The conformance testing suite for virtualization related features. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/network/virtualization") + Qualifiers: []string{ + `name.contains("[Suite:openshift/network/virtualization")`, }, Parallelism: 3, }, - { - Name: "experimental/reliability/minimal", - Description: templates.LongDesc(` - Set of highly reliable tests. - `), - Matches: func(name string) bool { - - _, exists := minimal[name] - if !exists { - return false - } - return !isDisabled(name) && strings.Contains(name, "[Suite:openshift/conformance/parallel") - }, - Parallelism: 20, - MaximumAllowedFlakes: 15, - }, { Name: "all", Description: templates.LongDesc(` Run all tests. `), - Matches: func(name string) bool { - return true + Qualifiers: []string{ + `true`, }, }, { @@ -355,11 +338,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs vertical scaling tests to exercise the safe scale-up and scale-down of etcd members. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/etcd/scaling") || strings.Contains(name, "[Feature:EtcdVerticalScaling]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests(`name.contains("[Suite:openshift/etcd/scaling") || name.contains("[Feature:EtcdVerticalScaling]")`), }, // etcd's vertical scaling test can take a while for apiserver rollouts to stabilize on the same revision TestTimeout: 60 * time.Minute, @@ -369,11 +349,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs etcd recovery tests to exercise the safe restore process of etcd members. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/etcd/recovery") || strings.Contains(name, "[Feature:EtcdRecovery]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests(`name.contains("[Suite:openshift/etcd/recovery") || name.contains("[Feature:EtcdRecovery]")`), }, // etcd's restore test can take a while for apiserver rollouts to stabilize Parallelism: 1, @@ -385,11 +362,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs etcd cert rotation tests to exercise the the automatic and manual certificate rotation. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/etcd/certrotation") || strings.Contains(name, "[Feature:CertRotation]") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests(`name.contains("[Suite:openshift/etcd/certrotation") || name.contains("[Feature:CertRotation]")`), }, TestTimeout: 60 * time.Minute, Parallelism: 1, @@ -400,11 +374,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs kube-apiserver rollout reliability. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/kube-apiserver/rollout") || isStandardEarlyOrLateTest(name) + Qualifiers: []string{ + withStandardEarlyOrLateTests(`name.contains("[Suite:openshift/kube-apiserver/rollout")`), }, TestTimeout: 90 * time.Minute, Parallelism: 1, @@ -415,11 +386,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs tests to validate realtime functionality on nodes. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/nodes/realtime") + Qualifiers: []string{ + `name.contains("[Suite:openshift/nodes/realtime")`, }, TestTimeout: 30 * time.Minute, }, @@ -428,11 +396,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs tests to validate realtime latency on nodes. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/nodes/realtime/latency") + Qualifiers: []string{ + `name.contains("[Suite:openshift/nodes/realtime/latency")`, }, TestTimeout: 30 * time.Minute, }, @@ -441,11 +406,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs tests to validate user namespace functionality. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/usernamespace") + Qualifiers: []string{ + `name.contains("[Suite:openshift/usernamespace")`, }, TestTimeout: 60 * time.Minute, }, @@ -454,11 +416,8 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs tests to validate machine-config-operator functionality. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/machine-config-operator/disruptive") + Qualifiers: []string{ + `name.contains("[Suite:openshift/machine-config-operator/disruptive")`, }, TestTimeout: 120 * time.Minute, }, @@ -467,12 +426,33 @@ var staticSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` This test suite runs tests to validate two-node. `), - Matches: func(name string) bool { - if isDisabled(name) { - return false - } - return strings.Contains(name, "[Suite:openshift/two-node") || strings.Contains(name, "[FeatureGate:DualReplica]") || strings.Contains(name, "[FeatureGate:HighlyAvailableArbiter]") + Qualifiers: []string{ + withExcludedTestsFilter(`name.contains("[Suite:openshift/two-node") || name.contains("[FeatureGate:DualReplica]") || name.contains("[FeatureGate:HighlyAvailableArbiter]")`), }, TestTimeout: 60 * time.Minute, }, } + +func withExcludedTestsFilter(baseExpr string) string { + excluded := []string{ + "[Disabled:", + "[Disruptive]", + "[Skipped]", + "[Slow]", + "[Flaky]", + "[Local]", + } + + filter := "" + for i, s := range excluded { + if i > 0 { + filter += " && " + } + filter += fmt.Sprintf("!name.contains('%s')", s) + } + + if baseExpr != "" { + return fmt.Sprintf("(%s) && (%s)", baseExpr, filter) + } + return filter +} diff --git a/pkg/testsuites/suites_test.go b/pkg/testsuites/suites_test.go new file mode 100644 index 000000000000..f6a3dd7a9f7f --- /dev/null +++ b/pkg/testsuites/suites_test.go @@ -0,0 +1,51 @@ +package testsuites + +import ( + "fmt" + "testing" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" +) + +// TestSuiteQualifiersValidCEL validates that all CEL expressions in suite qualifiers +// are syntactically valid +func TestSuiteQualifiersValidCEL(t *testing.T) { + dummyTest := &extensiontests.ExtensionTestSpec{ + Name: "[sig-test] Test a thing [Suite:openshift/conformance/parallel] [Early]", + } + dummySpecs := extensiontests.ExtensionTestSpecs{dummyTest} + + t.Run("standard suites", func(t *testing.T) { + for _, suite := range staticSuites { + t.Run(suite.Name, func(t *testing.T) { + for i, qualifier := range suite.Qualifiers { + t.Run(fmt.Sprintf("qualifier_%d", i), func(t *testing.T) { + // Attempt to filter using the qualifier - this will validate the CEL expression + _, err := dummySpecs.Filter([]string{qualifier}) + if err != nil { + t.Errorf("Invalid CEL expression in suite %q, qualifier %d: %q\nError: %v", + suite.Name, i, qualifier, err) + } + }) + } + }) + } + }) + + t.Run("upgrade suites", func(t *testing.T) { + for _, suite := range upgradeSuites { + t.Run(suite.Name, func(t *testing.T) { + for i, qualifier := range suite.Qualifiers { + t.Run(fmt.Sprintf("qualifier_%d", i), func(t *testing.T) { + // Attempt to filter using the qualifier - this will validate the CEL expression + _, err := dummySpecs.Filter([]string{qualifier}) + if err != nil { + t.Errorf("Invalid CEL expression in upgrade suite %q, qualifier %d: %q\nError: %v", + suite.Name, i, qualifier, err) + } + }) + } + }) + } + }) +} diff --git a/pkg/testsuites/upgrade_suites.go b/pkg/testsuites/upgrade_suites.go index 5d12e9d1325b..412139013555 100644 --- a/pkg/testsuites/upgrade_suites.go +++ b/pkg/testsuites/upgrade_suites.go @@ -1,11 +1,11 @@ package testsuites import ( - "strings" "time" - "github.com/openshift/origin/pkg/test/ginkgo" "k8s.io/kubectl/pkg/util/templates" + + "github.com/openshift/origin/pkg/test/ginkgo" ) func UpgradeTestSuites() []*ginkgo.TestSuite { @@ -24,11 +24,8 @@ var upgradeSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Run all tests. `), - Matches: func(name string) bool { - if isStandardEarlyTest(name) { - return true - } - return strings.Contains(name, "[Feature:ClusterUpgrade]") && !strings.Contains(name, "[Suite:k8s]") + Qualifiers: []string{ + withStandardEarlyTests(`name.contains("[Feature:ClusterUpgrade]") && !name.contains("[Suite:k8s]")`), }, TestTimeout: 240 * time.Minute, }, @@ -37,11 +34,8 @@ var upgradeSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Run only the tests that verify the platform remains available. `), - Matches: func(name string) bool { - if isStandardEarlyTest(name) { - return true - } - return strings.Contains(name, "[Feature:ClusterUpgrade]") && !strings.Contains(name, "[Suite:k8s]") + Qualifiers: []string{ + withStandardEarlyTests(`name.contains("[Feature:ClusterUpgrade]") && !name.contains("[Suite:k8s]")`), }, TestTimeout: 240 * time.Minute, }, @@ -50,11 +44,8 @@ var upgradeSuites = []ginkgo.TestSuite{ Description: templates.LongDesc(` Don't run disruption tests. `), - Matches: func(name string) bool { - if isStandardEarlyTest(name) { - return true - } - return strings.Contains(name, "[Feature:ClusterUpgrade]") && !strings.Contains(name, "[Suite:k8s]") + Qualifiers: []string{ + withStandardEarlyTests(`name.contains("[Feature:ClusterUpgrade]") && !name.contains("[Suite:k8s]")`), }, TestTimeout: 240 * time.Minute, }, diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go new file mode 100644 index 000000000000..1d4237876d53 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo/info.go @@ -0,0 +1,38 @@ +package cmdinfo + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewInfoCommand(registry *extension.Registry) *cobra.Command { + componentFlags := flags.NewComponentFlags() + + cmd := &cobra.Command{ + Use: "info", + Short: "Display extension metadata", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + extension := registry.Get(componentFlags.Component) + if extension == nil { + return fmt.Errorf("couldn't find the component %q", componentFlags.Component) + } + + info, err := json.MarshalIndent(extension, "", " ") + if err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "%s\n", string(info)) + return nil + }, + } + componentFlags.BindFlags(cmd.Flags()) + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go new file mode 100644 index 000000000000..31a040b7c9fc --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist/list.go @@ -0,0 +1,133 @@ +package cmdlist + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewListCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + suiteFlags *flags.SuiteFlags + outputFlags *flags.OutputFlags + environmentalFlags *flags.EnvironmentalFlags + }{ + suiteFlags: flags.NewSuiteFlags(), + componentFlags: flags.NewComponentFlags(), + outputFlags: flags.NewOutputFlags(), + environmentalFlags: flags.NewEnvironmentalFlags(), + } + + // Tests + listTestsCmd := &cobra.Command{ + Use: "tests", + Short: "List available tests", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + + // Find suite, if specified + var foundSuite *extension.Suite + var err error + if opts.suiteFlags.Suite != "" { + foundSuite, err = ext.GetSuite(opts.suiteFlags.Suite) + if err != nil { + return err + } + } + + // Filter for suite + specs := ext.GetSpecs() + if foundSuite != nil { + specs, err = specs.Filter(foundSuite.Qualifiers) + if err != nil { + return err + } + } + + specs, err = specs.FilterByEnvironment(*opts.environmentalFlags) + if err != nil { + return err + } + + data, err := opts.outputFlags.Marshal(specs) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.suiteFlags.BindFlags(listTestsCmd.Flags()) + opts.componentFlags.BindFlags(listTestsCmd.Flags()) + opts.environmentalFlags.BindFlags(listTestsCmd.Flags()) + opts.outputFlags.BindFlags(listTestsCmd.Flags()) + + // Suites + listSuitesCommand := &cobra.Command{ + Use: "suites", + Short: "List available suites", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + + suites := ext.Suites + + data, err := opts.outputFlags.Marshal(suites) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.componentFlags.BindFlags(listSuitesCommand.Flags()) + opts.outputFlags.BindFlags(listSuitesCommand.Flags()) + + // Components + listComponentsCmd := &cobra.Command{ + Use: "components", + Short: "List available components", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + var components []*extension.Component + registry.Walk(func(e *extension.Extension) { + components = append(components, &e.Component) + }) + + data, err := opts.outputFlags.Marshal(components) + if err != nil { + return err + } + fmt.Fprintf(os.Stdout, "%s\n", string(data)) + return nil + }, + } + opts.outputFlags.BindFlags(listComponentsCmd.Flags()) + + var listCmd = &cobra.Command{ + Use: "list [subcommand]", + Short: "List items", + RunE: func(cmd *cobra.Command, args []string) error { + return listTestsCmd.RunE(cmd, args) + }, + } + opts.suiteFlags.BindFlags(listCmd.Flags()) + opts.componentFlags.BindFlags(listCmd.Flags()) + opts.outputFlags.BindFlags(listCmd.Flags()) + opts.environmentalFlags.BindFlags(listCmd.Flags()) + listCmd.AddCommand(listTestsCmd, listComponentsCmd, listSuitesCommand) + + return listCmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go new file mode 100644 index 000000000000..8dac95b7e2dc --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runsuite.go @@ -0,0 +1,64 @@ +package cmdrun + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewRunSuiteCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + outputFlags *flags.OutputFlags + concurrencyFlags *flags.ConcurrencyFlags + }{ + componentFlags: flags.NewComponentFlags(), + outputFlags: flags.NewOutputFlags(), + concurrencyFlags: flags.NewConcurrencyFlags(), + } + + cmd := &cobra.Command{ + Use: "run-suite NAME", + Short: "Run a group of tests by suite. This is more limited than origin, and intended for light local " + + "development use. Orchestration parameters, scheduling, isolation, etc are not obeyed, and Ginkgo tests are executed serially.", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + if len(args) != 1 { + return fmt.Errorf("must specify one suite name") + } + + w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output)) + if err != nil { + return err + } + defer w.Flush() + + suite, err := ext.GetSuite(args[0]) + if err != nil { + return errors.Wrapf(err, "couldn't find suite: %s", args[0]) + } + + specs, err := ext.GetSpecs().Filter(suite.Qualifiers) + if err != nil { + return errors.Wrap(err, "couldn't filter specs") + } + + return specs.Run(w, opts.concurrencyFlags.MaxConcurency) + }, + } + opts.componentFlags.BindFlags(cmd.Flags()) + opts.outputFlags.BindFlags(cmd.Flags()) + opts.concurrencyFlags.BindFlags(cmd.Flags()) + + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go new file mode 100644 index 000000000000..ea4b62cb6bfb --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun/runtest.go @@ -0,0 +1,81 @@ +package cmdrun + +import ( + "bufio" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +func NewRunTestCommand(registry *extension.Registry) *cobra.Command { + opts := struct { + componentFlags *flags.ComponentFlags + concurrencyFlags *flags.ConcurrencyFlags + nameFlags *flags.NamesFlags + outputFlags *flags.OutputFlags + }{ + componentFlags: flags.NewComponentFlags(), + nameFlags: flags.NewNamesFlags(), + outputFlags: flags.NewOutputFlags(), + concurrencyFlags: flags.NewConcurrencyFlags(), + } + + cmd := &cobra.Command{ + Use: "run-test [-n NAME...] [NAME]", + Short: "Runs tests by name", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + ext := registry.Get(opts.componentFlags.Component) + if ext == nil { + return fmt.Errorf("component not found: %s", opts.componentFlags.Component) + } + if len(args) > 1 { + return fmt.Errorf("use --names to specify more than one test") + } + opts.nameFlags.Names = append(opts.nameFlags.Names, args...) + + // allow reading tests from an stdin pipe + info, err := os.Stdin.Stat() + if err != nil { + return err + } + if info.Mode()&os.ModeCharDevice == 0 { // Check if input is from a pipe + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + opts.nameFlags.Names = append(opts.nameFlags.Names, scanner.Text()) + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading from stdin: %v", err) + } + } + + if len(opts.nameFlags.Names) == 0 { + return fmt.Errorf("must specify at least one test") + } + + specs, err := ext.FindSpecsByName(opts.nameFlags.Names...) + if err != nil { + return err + } + + w, err := extensiontests.NewResultWriter(os.Stdout, extensiontests.ResultFormat(opts.outputFlags.Output)) + if err != nil { + return err + } + defer w.Flush() + + return specs.Run(w, opts.concurrencyFlags.MaxConcurency) + }, + } + opts.componentFlags.BindFlags(cmd.Flags()) + opts.nameFlags.BindFlags(cmd.Flags()) + opts.outputFlags.BindFlags(cmd.Flags()) + opts.concurrencyFlags.BindFlags(cmd.Flags()) + + return cmd +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go new file mode 100644 index 000000000000..b7651ba0220d --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/dbtime/time.go @@ -0,0 +1,26 @@ +package dbtime + +import "time" + +// DBTime is a type suitable for direct importing into databases like BigQuery, +// formatted like 2006-01-02 15:04:05.000000 UTC. +type DBTime time.Time + +func Ptr(t time.Time) *DBTime { + return (*DBTime)(&t) +} + +func (dbt *DBTime) MarshalJSON() ([]byte, error) { + formattedTime := time.Time(*dbt).Format(`"2006-01-02 15:04:05.000000 UTC"`) + return []byte(formattedTime), nil +} + +func (dbt *DBTime) UnmarshalJSON(b []byte) error { + timeStr := string(b[1 : len(b)-1]) + parsedTime, err := time.Parse("2006-01-02 15:04:05.000000 UTC", timeStr) + if err != nil { + return err + } + *dbt = (DBTime)(parsedTime) + return nil +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go new file mode 100644 index 000000000000..d1c45762d037 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extension.go @@ -0,0 +1,159 @@ +package extension + +import ( + "fmt" + "strings" + + et "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets" + "github.com/openshift-eng/openshift-tests-extension/pkg/version" +) + +func NewExtension(product, kind, name string) *Extension { + return &Extension{ + APIVersion: CurrentExtensionAPIVersion, + Source: Source{ + Commit: version.CommitFromGit, + BuildDate: version.BuildDate, + GitTreeState: version.GitTreeState, + }, + Component: Component{ + Product: product, + Kind: kind, + Name: name, + }, + } +} + +func (e *Extension) GetSuite(name string) (*Suite, error) { + var suite *Suite + + for _, s := range e.Suites { + if s.Name == name { + suite = &s + break + } + } + + if suite == nil { + return nil, fmt.Errorf("no such suite: %s", name) + } + + return suite, nil +} + +func (e *Extension) GetSpecs() et.ExtensionTestSpecs { + return e.specs +} + +func (e *Extension) AddSpecs(specs et.ExtensionTestSpecs) { + specs.Walk(func(spec *et.ExtensionTestSpec) { + spec.Source = e.Component.Identifier() + }) + + e.specs = append(e.specs, specs...) +} + +// IgnoreObsoleteTests allows removal of a test. +func (e *Extension) IgnoreObsoleteTests(testNames ...string) { + if e.obsoleteTests == nil { + e.obsoleteTests = sets.New[string](testNames...) + } else { + e.obsoleteTests.Insert(testNames...) + } +} + +// FindRemovedTestsWithoutRename compares the current set of test specs against oldSpecs, including consideration of the original name, +// we return an error. Can be used to detect test renames or removals. +func (e *Extension) FindRemovedTestsWithoutRename(oldSpecs et.ExtensionTestSpecs) ([]string, error) { + currentSpecs := e.GetSpecs() + currentMap := make(map[string]bool) + + // Populate current specs into a map for quick lookup by both Name and OriginalName. + for _, spec := range currentSpecs { + currentMap[spec.Name] = true + if spec.OriginalName != "" { + currentMap[spec.OriginalName] = true + } + } + + var removedTests []string + + // Check oldSpecs against current specs. + for _, oldSpec := range oldSpecs { + // Skip if the test is marked as obsolete. + if e.obsoleteTests.Has(oldSpec.Name) { + continue + } + + // Check if oldSpec is missing in currentSpecs by both Name and OriginalName. + if !currentMap[oldSpec.Name] && (oldSpec.OriginalName == "" || !currentMap[oldSpec.OriginalName]) { + removedTests = append(removedTests, oldSpec.Name) + } + } + + // Return error if any removed tests were found. + if len(removedTests) > 0 { + return removedTests, fmt.Errorf("tests removed without rename: %v", removedTests) + } + + return nil, nil +} + +// AddGlobalSuite adds a suite whose qualifiers will apply to all tests, +// not just this one. Allowing a developer to create a composed suite of +// tests from many sources. +func (e *Extension) AddGlobalSuite(suite Suite) *Extension { + if e.Suites == nil { + e.Suites = []Suite{suite} + } else { + e.Suites = append(e.Suites, suite) + } + + return e +} + +// AddSuite adds a suite whose qualifiers will only apply to tests present +// in its own extension. +func (e *Extension) AddSuite(suite Suite) *Extension { + expr := fmt.Sprintf("source == %q", e.Component.Identifier()) + for i := range suite.Qualifiers { + suite.Qualifiers[i] = fmt.Sprintf("(%s) && (%s)", expr, suite.Qualifiers[i]) + } + e.AddGlobalSuite(suite) + return e +} + +func (e *Extension) RegisterImage(image Image) *Extension { + e.Images = append(e.Images, image) + return e +} + +func (e *Extension) FindSpecsByName(names ...string) (et.ExtensionTestSpecs, error) { + var specs et.ExtensionTestSpecs + var notFound []string + + for _, name := range names { + found := false + for i := range e.specs { + if e.specs[i].Name == name { + specs = append(specs, e.specs[i]) + found = true + break + } + } + if !found { + notFound = append(notFound, name) + } + } + + if len(notFound) > 0 { + return nil, fmt.Errorf("no such tests: %s", strings.Join(notFound, ", ")) + } + + return specs, nil +} + +func (e *Component) Identifier() string { + return fmt.Sprintf("%s:%s:%s", e.Product, e.Kind, e.Name) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go new file mode 100644 index 000000000000..b5116a5359a9 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/environment.go @@ -0,0 +1,92 @@ +package extensiontests + +import ( + "fmt" + "strings" +) + +func PlatformEquals(platform string) string { + return fmt.Sprintf(`platform=="%s"`, platform) +} + +func NetworkEquals(network string) string { + return fmt.Sprintf(`network=="%s"`, network) +} + +func NetworkStackEquals(networkStack string) string { + return fmt.Sprintf(`networkStack=="%s"`, networkStack) +} + +func UpgradeEquals(upgrade string) string { + return fmt.Sprintf(`upgrade=="%s"`, upgrade) +} + +func TopologyEquals(topology string) string { + return fmt.Sprintf(`topology=="%s"`, topology) +} + +func ArchitectureEquals(arch string) string { + return fmt.Sprintf(`architecture=="%s"`, arch) +} + +func APIGroupEnabled(apiGroup string) string { + return fmt.Sprintf(`apiGroups.exists(api, api=="%s")`, apiGroup) +} + +func APIGroupDisabled(apiGroup string) string { + return fmt.Sprintf(`!apiGroups.exists(api, api=="%s")`, apiGroup) +} + +func FeatureGateEnabled(featureGate string) string { + return fmt.Sprintf(`featureGates.exists(fg, fg=="%s")`, featureGate) +} + +func FeatureGateDisabled(featureGate string) string { + return fmt.Sprintf(`!featureGates.exists(fg, fg=="%s")`, featureGate) +} + +func ExternalConnectivityEquals(externalConnectivity string) string { + return fmt.Sprintf(`externalConnectivity=="%s"`, externalConnectivity) +} + +func OptionalCapabilitiesIncludeAny(optionalCapability ...string) string { + for i := range optionalCapability { + optionalCapability[i] = OptionalCapabilityExists(optionalCapability[i]) + } + return fmt.Sprintf("(%s)", fmt.Sprint(strings.Join(optionalCapability, " || "))) +} + +func OptionalCapabilitiesIncludeAll(optionalCapability ...string) string { + for i := range optionalCapability { + optionalCapability[i] = OptionalCapabilityExists(optionalCapability[i]) + } + return fmt.Sprintf("(%s)", fmt.Sprint(strings.Join(optionalCapability, " && "))) +} + +func OptionalCapabilityExists(optionalCapability string) string { + return fmt.Sprintf(`optionalCapabilities.exists(oc, oc=="%s")`, optionalCapability) +} + +func NoOptionalCapabilitiesExist() string { + return "size(optionalCapabilities) == 0" +} + +func InstallerEquals(installer string) string { + return fmt.Sprintf(`installer=="%s"`, installer) +} + +func VersionEquals(version string) string { + return fmt.Sprintf(`version=="%s"`, version) +} + +func FactEquals(key, value string) string { + return fmt.Sprintf(`(fact_keys.exists(k, k=="%s") && facts["%s"].matches("%s"))`, key, key, value) +} + +func Or(cel ...string) string { + return fmt.Sprintf("(%s)", strings.Join(cel, " || ")) +} + +func And(cel ...string) string { + return fmt.Sprintf("(%s)", strings.Join(cel, " && ")) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go new file mode 100644 index 000000000000..f33fb5c2745b --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result.go @@ -0,0 +1,12 @@ +package extensiontests + +func (results ExtensionTestResults) Walk(walkFn func(*ExtensionTestResult)) { + for i := range results { + walkFn(results[i]) + } +} + +// AddDetails adds additional information to an ExtensionTestResult. Value must marshal to JSON. +func (result *ExtensionTestResult) AddDetails(name string, value interface{}) { + result.Details = append(result.Details, Details{Name: name, Value: value}) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go new file mode 100644 index 000000000000..821599791352 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/result_writer.go @@ -0,0 +1,71 @@ +package extensiontests + +import ( + "encoding/json" + "fmt" + "io" +) + +type ResultWriter interface { + Write(result *ExtensionTestResult) + Flush() +} + +type NullResultWriter struct{} + +func (NullResultWriter) Write(*ExtensionTestResult) {} +func (NullResultWriter) Flush() {} + +type ResultFormat string + +var ( + JSON ResultFormat = "json" + JSONL ResultFormat = "jsonl" +) + +type JSONResultWriter struct { + out io.Writer + format ResultFormat + results ExtensionTestResults +} + +func NewResultWriter(out io.Writer, format ResultFormat) (*JSONResultWriter, error) { + switch format { + case JSON, JSONL: + // do nothing + default: + return nil, fmt.Errorf("unsupported result format: %s", format) + } + + return &JSONResultWriter{ + out: out, + format: format, + }, nil +} + +func (w *JSONResultWriter) Write(result *ExtensionTestResult) { + switch w.format { + case JSONL: + // JSONL gets written to out as we get the items + data, err := json.Marshal(result) + if err != nil { + panic(err) + } + fmt.Fprintf(w.out, "%s\n", string(data)) + case JSON: + w.results = append(w.results, result) + } +} + +func (w *JSONResultWriter) Flush() { + switch w.format { + case JSONL: + // we already wrote it out + case JSON: + data, err := json.MarshalIndent(w.results, "", " ") + if err != nil { + panic(err) + } + fmt.Fprintf(w.out, "%s\n", string(data)) + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go new file mode 100644 index 000000000000..9d889c205ae7 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/spec.go @@ -0,0 +1,567 @@ +package extensiontests + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + + "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime" + "github.com/openshift-eng/openshift-tests-extension/pkg/flags" +) + +// Walk iterates over all test specs, and executions the function provided. The test spec can be mutated. +func (specs ExtensionTestSpecs) Walk(walkFn func(*ExtensionTestSpec)) ExtensionTestSpecs { + for i := range specs { + walkFn(specs[i]) + } + + return specs +} + +type SelectFunction func(spec *ExtensionTestSpec) bool + +// Select filters the ExtensionTestSpecs to only those that match the provided SelectFunction +func (specs ExtensionTestSpecs) Select(selectFn SelectFunction) ExtensionTestSpecs { + filtered := ExtensionTestSpecs{} + for _, spec := range specs { + if selectFn(spec) { + filtered = append(filtered, spec) + } + } + + return filtered +} + +// MustSelect filters the ExtensionTestSpecs to only those that match the provided SelectFunction. +// if no specs are selected, it will throw an error +func (specs ExtensionTestSpecs) MustSelect(selectFn SelectFunction) (ExtensionTestSpecs, error) { + filtered := specs.Select(selectFn) + if len(filtered) == 0 { + return filtered, fmt.Errorf("no specs selected with specified SelectFunctions") + } + + return filtered, nil +} + +// SelectAny filters the ExtensionTestSpecs to only those that match any of the provided SelectFunctions +func (specs ExtensionTestSpecs) SelectAny(selectFns []SelectFunction) ExtensionTestSpecs { + filtered := ExtensionTestSpecs{} + for _, spec := range specs { + for _, selectFn := range selectFns { + if selectFn(spec) { + filtered = append(filtered, spec) + break + } + } + } + + return filtered +} + +// MustSelectAny filters the ExtensionTestSpecs to only those that match any of the provided SelectFunctions. +// if no specs are selected, it will throw an error +func (specs ExtensionTestSpecs) MustSelectAny(selectFns []SelectFunction) (ExtensionTestSpecs, error) { + filtered := specs.SelectAny(selectFns) + if len(filtered) == 0 { + return filtered, fmt.Errorf("no specs selected with specified SelectFunctions") + } + + return filtered, nil +} + +// SelectAll filters the ExtensionTestSpecs to only those that match all the provided SelectFunctions +func (specs ExtensionTestSpecs) SelectAll(selectFns []SelectFunction) ExtensionTestSpecs { + filtered := ExtensionTestSpecs{} + for _, spec := range specs { + anyFalse := false + for _, selectFn := range selectFns { + if !selectFn(spec) { + anyFalse = true + break + } + } + if !anyFalse { + filtered = append(filtered, spec) + } + } + + return filtered +} + +// MustSelectAll filters the ExtensionTestSpecs to only those that match all the provided SelectFunctions. +// if no specs are selected, it will throw an error +func (specs ExtensionTestSpecs) MustSelectAll(selectFns []SelectFunction) (ExtensionTestSpecs, error) { + filtered := specs.SelectAll(selectFns) + if len(filtered) == 0 { + return filtered, fmt.Errorf("no specs selected with specified SelectFunctions") + } + + return filtered, nil +} + +// ModuleTestsOnly ensures that ginkgo tests from vendored sources aren't selected. +func ModuleTestsOnly() SelectFunction { + return func(spec *ExtensionTestSpec) bool { + for _, cl := range spec.CodeLocations { + if strings.Contains(cl, "/vendor/") { + return false + } + } + + return true + } +} + +// AllTestsIncludingVendored is an alternative to ModuleTestsOnly, which would explicitly opt-in +// to including vendored tests. +func AllTestsIncludingVendored() SelectFunction { + return func(spec *ExtensionTestSpec) bool { + return true + } +} + +// NameContains returns a function that selects specs whose name contains the provided string +func NameContains(name string) SelectFunction { + return func(spec *ExtensionTestSpec) bool { + return strings.Contains(spec.Name, name) + } +} + +// NameContainsAll returns a function that selects specs whose name contains each of the provided contents strings +func NameContainsAll(contents ...string) SelectFunction { + return func(spec *ExtensionTestSpec) bool { + for _, content := range contents { + if !strings.Contains(spec.Name, content) { + return false + } + } + return true + } +} + +// HasLabel returns a function that selects specs with the provided label +func HasLabel(label string) SelectFunction { + return func(spec *ExtensionTestSpec) bool { + return spec.Labels.Has(label) + } +} + +// HasTagWithValue returns a function that selects specs containing a tag with the provided key and value +func HasTagWithValue(key, value string) SelectFunction { + return func(spec *ExtensionTestSpec) bool { + return spec.Tags[key] == value + } +} + +// WithLifecycle returns a function that selects specs with the provided Lifecycle +func WithLifecycle(lifecycle Lifecycle) SelectFunction { + return func(spec *ExtensionTestSpec) bool { + return spec.Lifecycle == lifecycle + } +} + +func (specs ExtensionTestSpecs) Names() []string { + var names []string + for _, spec := range specs { + names = append(names, spec.Name) + } + return names +} + +// Run executes all the specs in parallel, up to maxConcurrent at the same time. Results +// are written to the given ResultWriter after each spec has completed execution. BeforeEach, +// BeforeAll, AfterEach, AfterAll hooks are executed when specified. "Each" hooks must be thread +// safe. Returns an error if any test spec failed, indicating the quantity of failures. +func (specs ExtensionTestSpecs) Run(w ResultWriter, maxConcurrent int) error { + queue := make(chan *ExtensionTestSpec) + failures := atomic.Int64{} + + // Execute beforeAll + for _, spec := range specs { + for _, beforeAllTask := range spec.beforeAll { + beforeAllTask.Run() + } + } + + // Feed the queue + go func() { + specs.Walk(func(spec *ExtensionTestSpec) { + queue <- spec + }) + close(queue) + }() + + // Start consumers + var wg sync.WaitGroup + for i := 0; i < maxConcurrent; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for spec := range queue { + for _, beforeEachTask := range spec.beforeEach { + beforeEachTask.Run(*spec) + } + + res := runSpec(spec) + if res.Result == ResultFailed { + failures.Add(1) + } + + for _, afterEachTask := range spec.afterEach { + afterEachTask.Run(res) + } + + // We can't assume the runner will set the name of a test; it may not know it. Even if + // it does, we may want to modify it (e.g. k8s-tests for annotations currently). + res.Name = spec.Name + w.Write(res) + } + }() + } + + // Wait for all consumers to finish + wg.Wait() + + // Execute afterAll + for _, spec := range specs { + for _, afterAllTask := range spec.afterAll { + afterAllTask.Run() + } + } + + failCount := failures.Load() + if failCount > 0 { + return fmt.Errorf("%d tests failed", failCount) + } + return nil +} + +// AddBeforeAll adds a function to be run once before all tests start executing. +func (specs ExtensionTestSpecs) AddBeforeAll(fn func()) { + task := &OneTimeTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.beforeAll = append(spec.beforeAll, task) + }) +} + +// AddAfterAll adds a function to be run once after all tests have finished. +func (specs ExtensionTestSpecs) AddAfterAll(fn func()) { + task := &OneTimeTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.afterAll = append(spec.afterAll, task) + }) +} + +// AddBeforeEach adds a function that runs before each test starts executing. The ExtensionTestSpec is +// passed in for contextual information, but must not be modified. The provided function must be thread +// safe. +func (specs ExtensionTestSpecs) AddBeforeEach(fn func(spec ExtensionTestSpec)) { + task := &SpecTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.beforeEach = append(spec.beforeEach, task) + }) +} + +// AddAfterEach adds a function that runs after each test has finished executing. The ExtensionTestResult +// can be modified if needed. The provided function must be thread safe. +func (specs ExtensionTestSpecs) AddAfterEach(fn func(task *ExtensionTestResult)) { + task := &TestResultTask{fn: fn} + specs.Walk(func(spec *ExtensionTestSpec) { + spec.afterEach = append(spec.afterEach, task) + }) +} + +// MustFilter filters specs using the given celExprs. Each celExpr is OR'd together, if any +// match the spec is included in the filtered set. If your CEL expression is invalid or filtering +// otherwise fails, this function panics. +func (specs ExtensionTestSpecs) MustFilter(celExprs []string) ExtensionTestSpecs { + specs, err := specs.Filter(celExprs) + if err != nil { + panic(fmt.Sprintf("filter did not succeed: %s", err.Error())) + } + + return specs +} + +// Filter filters specs using the given celExprs. Each celExpr is OR'd together, if any +// match the spec is included in the filtered set. +func (specs ExtensionTestSpecs) Filter(celExprs []string) (ExtensionTestSpecs, error) { + var filteredSpecs ExtensionTestSpecs + + // Empty filters returns all + if len(celExprs) == 0 { + return specs, nil + } + + env, err := cel.NewEnv( + cel.Declarations( + decls.NewVar("source", decls.String), + decls.NewVar("name", decls.String), + decls.NewVar("originalName", decls.String), + decls.NewVar("labels", decls.NewListType(decls.String)), + decls.NewVar("codeLocations", decls.NewListType(decls.String)), + decls.NewVar("tags", decls.NewMapType(decls.String, decls.String)), + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create CEL environment: %w", err) + } + + // OR all expressions together + for _, spec := range specs { + include := false + for _, celExpr := range celExprs { + prg, err := programForCEL(env, celExpr) + if err != nil { + return nil, err + } + out, _, err := prg.Eval(map[string]interface{}{ + "name": spec.Name, + "source": spec.Source, + "originalName": spec.OriginalName, + "labels": spec.Labels.UnsortedList(), + "codeLocations": spec.CodeLocations, + "tags": spec.Tags, + }) + if err != nil { + return nil, fmt.Errorf("error evaluating CEL expression: %v", err) + } + + // If any CEL expression evaluates to true, include the TestSpec + if out == types.True { + include = true + break + } + } + if include { + filteredSpecs = append(filteredSpecs, spec) + } + } + + return filteredSpecs, nil +} + +func programForCEL(env *cel.Env, celExpr string) (cel.Program, error) { + // Parse CEL expression + ast, iss := env.Parse(celExpr) + if iss.Err() != nil { + return nil, fmt.Errorf("error parsing CEL expression '%s': %v", celExpr, iss.Err()) + } + + // Check the AST + checked, iss := env.Check(ast) + if iss.Err() != nil { + return nil, fmt.Errorf("error checking CEL expression '%s': %v", celExpr, iss.Err()) + } + + // Create a CEL program from the checked AST + prg, err := env.Program(checked) + if err != nil { + return nil, fmt.Errorf("error creating CEL program: %v", err) + } + return prg, nil +} + +// FilterByEnvironment checks both the Include and Exclude fields of the ExtensionTestSpec to return those specs which match. +// Tests will be included by default unless they are explicitly excluded. If Include is specified, only those tests matching +// the CEL expression will be included. +// +// See helper functions in extensiontests/environment.go to craft CEL expressions +func (specs ExtensionTestSpecs) FilterByEnvironment(envFlags flags.EnvironmentalFlags) (ExtensionTestSpecs, error) { + var filteredSpecs ExtensionTestSpecs + if envFlags.IsEmpty() { + return specs, nil + } + + env, err := cel.NewEnv( + cel.Declarations( + decls.NewVar("apiGroups", decls.NewListType(decls.String)), + decls.NewVar("architecture", decls.String), + decls.NewVar("externalConnectivity", decls.String), + decls.NewVar("fact_keys", decls.NewListType(decls.String)), + decls.NewVar("facts", decls.NewMapType(decls.String, decls.String)), + decls.NewVar("featureGates", decls.NewListType(decls.String)), + decls.NewVar("network", decls.String), + decls.NewVar("networkStack", decls.String), + decls.NewVar("optionalCapabilities", decls.NewListType(decls.String)), + decls.NewVar("platform", decls.String), + decls.NewVar("topology", decls.String), + decls.NewVar("upgrade", decls.String), + decls.NewVar("version", decls.String), + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create CEL environment: %w", err) + } + factKeys := make([]string, len(envFlags.Facts)) + for k := range envFlags.Facts { + factKeys = append(factKeys, k) + } + vars := map[string]interface{}{ + "apiGroups": envFlags.APIGroups, + "architecture": envFlags.Architecture, + "externalConnectivity": envFlags.ExternalConnectivity, + "fact_keys": factKeys, + "facts": envFlags.Facts, + "featureGates": envFlags.FeatureGates, + "network": envFlags.Network, + "networkStack": envFlags.NetworkStack, + "optionalCapabilities": envFlags.OptionalCapabilities, + "platform": envFlags.Platform, + "topology": envFlags.Topology, + "upgrade": envFlags.Upgrade, + "version": envFlags.Version, + } + + for _, spec := range specs { + envSel := spec.EnvironmentSelector + // If there is no include or exclude CEL, include it implicitly + if envSel.IsEmpty() { + filteredSpecs = append(filteredSpecs, spec) + continue + } + + if envSel.Exclude != "" { + prg, err := programForCEL(env, envSel.Exclude) + if err != nil { + return nil, err + } + out, _, err := prg.Eval(vars) + if err != nil { + return nil, fmt.Errorf("error evaluating CEL expression: %v", err) + } + // If it is explicitly excluded, don't check include + if out == types.True { + continue + } + } + + if envSel.Include != "" { + prg, err := programForCEL(env, envSel.Include) + if err != nil { + return nil, err + } + out, _, err := prg.Eval(vars) + if err != nil { + return nil, fmt.Errorf("error evaluating CEL expression: %v", err) + } + + if out == types.True { + filteredSpecs = append(filteredSpecs, spec) + } + } else { // If it hasn't been excluded, and there is no "include" it will be implicitly included + filteredSpecs = append(filteredSpecs, spec) + } + + } + + return filteredSpecs, nil +} + +// AddLabel adds the labels to each spec. +func (specs ExtensionTestSpecs) AddLabel(labels ...string) ExtensionTestSpecs { + for i := range specs { + specs[i].Labels.Insert(labels...) + } + + return specs +} + +// RemoveLabel removes the labels from each spec. +func (specs ExtensionTestSpecs) RemoveLabel(labels ...string) ExtensionTestSpecs { + for i := range specs { + specs[i].Labels.Delete(labels...) + } + + return specs +} + +// SetTag specifies a key/value pair for each spec. +func (specs ExtensionTestSpecs) SetTag(key, value string) ExtensionTestSpecs { + for i := range specs { + specs[i].Tags[key] = value + } + + return specs +} + +// UnsetTag removes the specified key from each spec. +func (specs ExtensionTestSpecs) UnsetTag(key string) ExtensionTestSpecs { + for i := range specs { + delete(specs[i].Tags, key) + } + + return specs +} + +// Include adds the specified CEL expression to explicitly include tests by environment to each spec +func (specs ExtensionTestSpecs) Include(includeCEL string) ExtensionTestSpecs { + for _, spec := range specs { + spec.Include(includeCEL) + } + return specs +} + +// Exclude adds the specified CEL expression to explicitly exclude tests by environment to each spec +func (specs ExtensionTestSpecs) Exclude(excludeCEL string) ExtensionTestSpecs { + for _, spec := range specs { + spec.Exclude(excludeCEL) + } + return specs +} + +// Include adds the specified CEL expression to explicitly include tests by environment. +// If there is already an "include" defined, it will OR the expressions together +func (spec *ExtensionTestSpec) Include(includeCEL string) *ExtensionTestSpec { + existingInclude := spec.EnvironmentSelector.Include + if existingInclude != "" { + includeCEL = fmt.Sprintf("(%s) || (%s)", existingInclude, includeCEL) + } + + spec.EnvironmentSelector.Include = includeCEL + return spec +} + +// Exclude adds the specified CEL expression to explicitly exclude tests by environment. +// If there is already an "exclude" defined, it will OR the expressions together +func (spec *ExtensionTestSpec) Exclude(excludeCEL string) *ExtensionTestSpec { + existingExclude := spec.EnvironmentSelector.Exclude + if existingExclude != "" { + excludeCEL = fmt.Sprintf("(%s) || (%s)", existingExclude, excludeCEL) + } + + spec.EnvironmentSelector.Exclude = excludeCEL + return spec +} + +func runSpec(spec *ExtensionTestSpec) *ExtensionTestResult { + startTime := time.Now().UTC() + res := spec.Run() + duration := time.Since(startTime) + endTime := startTime.Add(duration).UTC() + if res == nil { + // this shouldn't happen + panic(fmt.Sprintf("test produced no result: %s", spec.Name)) + } + + res.Lifecycle = spec.Lifecycle + + // If the runner doesn't populate this info, we should set it + if res.StartTime == nil { + res.StartTime = dbtime.Ptr(startTime) + } + if res.EndTime == nil { + res.EndTime = dbtime.Ptr(endTime) + } + if res.Duration == 0 { + res.Duration = duration.Milliseconds() + } + + return res +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go new file mode 100644 index 000000000000..e808bea87bb8 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/task.go @@ -0,0 +1,31 @@ +package extensiontests + +import "sync/atomic" + +type SpecTask struct { + fn func(spec ExtensionTestSpec) +} + +func (t *SpecTask) Run(spec ExtensionTestSpec) { + t.fn(spec) +} + +type TestResultTask struct { + fn func(result *ExtensionTestResult) +} + +func (t *TestResultTask) Run(result *ExtensionTestResult) { + t.fn(result) +} + +type OneTimeTask struct { + fn func() + executed int32 // Atomic boolean to indicate whether the function has been run +} + +func (t *OneTimeTask) Run() { + // Ensure one-time tasks are only run once + if atomic.CompareAndSwapInt32(&t.executed, 0, 1) { + t.fn() + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go new file mode 100644 index 000000000000..2ec0444b68a4 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests/types.go @@ -0,0 +1,104 @@ +package extensiontests + +import ( + "github.com/openshift-eng/openshift-tests-extension/pkg/dbtime" + "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets" +) + +type Lifecycle string + +var LifecycleInforming Lifecycle = "informing" +var LifecycleBlocking Lifecycle = "blocking" + +type ExtensionTestSpecs []*ExtensionTestSpec + +type ExtensionTestSpec struct { + Name string `json:"name"` + + // OriginalName contains the very first name this test was ever known as, used to preserve + // history across all names. + OriginalName string `json:"originalName,omitempty"` + + // Labels are single string values to apply to the test spec + Labels sets.Set[string] `json:"labels"` + + // Tags are key:value pairs + Tags map[string]string `json:"tags,omitempty"` + + // Resources gives optional information about what's required to run this test. + Resources Resources `json:"resources"` + + // Source is the origin of the test. + Source string `json:"source"` + + // CodeLocations are the files where the spec originates from. + CodeLocations []string `json:"codeLocations,omitempty"` + + // Lifecycle informs the executor whether the test is informing only, and should not cause the + // overall job run to fail, or if it's blocking where a failure of the test is fatal. + // Informing lifecycle tests can be used temporarily to gather information about a test's stability. + // Tests must not remain informing forever. + Lifecycle Lifecycle `json:"lifecycle"` + + // EnvironmentSelector allows for CEL expressions to be used to control test inclusion + EnvironmentSelector EnvironmentSelector `json:"environmentSelector,omitempty"` + + // Run invokes a test + Run func() *ExtensionTestResult `json:"-"` + + // Hook functions + afterAll []*OneTimeTask + beforeAll []*OneTimeTask + afterEach []*TestResultTask + beforeEach []*SpecTask +} + +type Resources struct { + Isolation Isolation `json:"isolation"` + Memory string `json:"memory,omitempty"` + Duration string `json:"duration,omitempty"` + Timeout string `json:"timeout,omitempty"` +} + +type Isolation struct { + Mode string `json:"mode,omitempty"` + Conflict []string `json:"conflict,omitempty"` +} + +type EnvironmentSelector struct { + Include string `json:"include,omitempty"` + Exclude string `json:"exclude,omitempty"` +} + +func (e EnvironmentSelector) IsEmpty() bool { + return e.Include == "" && e.Exclude == "" +} + +type ExtensionTestResults []*ExtensionTestResult + +type Result string + +var ResultPassed Result = "passed" +var ResultSkipped Result = "skipped" +var ResultFailed Result = "failed" + +type ExtensionTestResult struct { + Name string `json:"name"` + Lifecycle Lifecycle `json:"lifecycle"` + Duration int64 `json:"duration"` + StartTime *dbtime.DBTime `json:"startTime"` + EndTime *dbtime.DBTime `json:"endTime"` + Result Result `json:"result"` + Output string `json:"output"` + Error string `json:"error,omitempty"` + Details []Details `json:"details,omitempty"` +} + +// Details are human-readable messages to further explain skips, timeouts, etc. +// It can also be used to provide contemporaneous information about failures +// that may not be easily returned by must-gather. For larger artifacts (greater than +// 10KB, write them to $EXTENSION_ARTIFACTS_DIR. +type Details struct { + Name string `json:"name"` + Value interface{} `json:"value"` +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go new file mode 100644 index 000000000000..bbae421df774 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/registry.go @@ -0,0 +1,39 @@ +package extension + +const DefaultExtension = "default" + +type Registry struct { + extensions map[string]*Extension +} + +func NewRegistry() *Registry { + var r Registry + return &r +} + +func (r *Registry) Walk(walkFn func(*Extension)) { + for k := range r.extensions { + if k == DefaultExtension { + continue + } + walkFn(r.extensions[k]) + } +} + +func (r *Registry) Get(name string) *Extension { + return r.extensions[name] +} + +func (r *Registry) Register(extension *Extension) { + if r.extensions == nil { + r.extensions = make(map[string]*Extension) + // first extension is default + r.extensions[DefaultExtension] = extension + } + + r.extensions[extension.Component.Identifier()] = extension +} + +func (r *Registry) Deregister(name string) { + delete(r.extensions, name) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go new file mode 100644 index 000000000000..3b51674f4aa8 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/extension/types.go @@ -0,0 +1,91 @@ +package extension + +import ( + "time" + + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets" +) + +const CurrentExtensionAPIVersion = "v1.1" + +// Extension represents an extension to openshift-tests. +type Extension struct { + APIVersion string `json:"apiVersion"` + Source Source `json:"source"` + Component Component `json:"component"` + + // Suites that the extension wants to advertise/participate in. + Suites []Suite `json:"suites"` + + Images []Image `json:"images"` + + // Private data + specs extensiontests.ExtensionTestSpecs + obsoleteTests sets.Set[string] +} + +// Source contains the details of the commit and source URL. +type Source struct { + // Commit from which this binary was compiled. + Commit string `json:"commit"` + // BuildDate ISO8601 string of when the binary was built + BuildDate string `json:"build_date"` + // GitTreeState lets you know the status of the git tree (clean/dirty) + GitTreeState string `json:"git_tree_state"` + // SourceURL contains the url of the git repository (if known) that this extension was built from. + SourceURL string `json:"source_url,omitempty"` +} + +// Component represents the component the binary acts on. +type Component struct { + // The product this component is part of. + Product string `json:"product"` + // The type of the component. + Kind string `json:"type"` + // The name of the component. + Name string `json:"name"` +} + +type ClusterStability string + +var ( + // ClusterStabilityStable means that at no point during testing do we expect a component to take downtime and upgrades are not happening. + ClusterStabilityStable ClusterStability = "Stable" + + // ClusterStabilityDisruptive means that the suite is expected to induce outages to the cluster. + ClusterStabilityDisruptive ClusterStability = "Disruptive" + + // ClusterStabilityUpgrade was previously defined, but was removed by @deads2k. Please contact him if you find a use + // case for it and needs to be reintroduced. + // ClusterStabilityUpgrade ClusterStability = "Upgrade" +) + +// Suite represents additional suites the extension wants to advertise. Child suites when being executed in the context +// of a parent will have their count, parallelism, stability, and timeout options superseded by the parent's suite. +type Suite struct { + Name string `json:"name"` + Description string `json:"description"` + + // Parents are the parent suites this suite is part of. + Parents []string `json:"parents,omitempty"` + // Qualifiers are CEL expressions that are OR'd together for test selection that are members of the suite. + Qualifiers []string `json:"qualifiers,omitempty"` + + // Count is the default number of times to execute each test in this suite. + Count int `json:"count,omitempty"` + // Parallelism is the maximum parallelism of this suite. + Parallelism int `json:"parallelism,omitempty"` + // ClusterStability informs openshift-tests whether this entire test suite is expected to be disruptive or not + // to normal cluster operations. + ClusterStability ClusterStability `json:"clusterStability,omitempty"` + // TestTimeout is the default timeout for tests in this suite. + TestTimeout *time.Duration `json:"testTimeout,omitempty"` +} + +type Image struct { + Index int `json:"index"` + Registry string `json:"registry"` + Name string `json:"name"` + Version string `json:"version"` +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go new file mode 100644 index 000000000000..ca9e425c4402 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/component.go @@ -0,0 +1,25 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +const DefaultExtension = "default" + +// ComponentFlags contains information for specifying the component. +type ComponentFlags struct { + Component string +} + +func NewComponentFlags() *ComponentFlags { + return &ComponentFlags{ + Component: DefaultExtension, + } +} + +func (f *ComponentFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVar(&f.Component, + "component", + f.Component, + "specify the component to enable") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go new file mode 100644 index 000000000000..2db07c765475 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/concurrency.go @@ -0,0 +1,23 @@ +package flags + +import "github.com/spf13/pflag" + +// ConcurrencyFlags contains information for configuring concurrency +type ConcurrencyFlags struct { + MaxConcurency int +} + +func NewConcurrencyFlags() *ConcurrencyFlags { + return &ConcurrencyFlags{ + MaxConcurency: 10, + } +} + +func (f *ConcurrencyFlags) BindFlags(fs *pflag.FlagSet) { + fs.IntVarP(&f.MaxConcurency, + "max-concurrency", + "c", + f.MaxConcurency, + "maximum number of tests to run in parallel", + ) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go new file mode 100644 index 000000000000..af7a0258e2e2 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/environment.go @@ -0,0 +1,114 @@ +package flags + +import ( + "reflect" + + "github.com/spf13/pflag" +) + +type EnvironmentalFlags struct { + APIGroups []string + Architecture string + ExternalConnectivity string + Facts map[string]string + FeatureGates []string + Network string + NetworkStack string + OptionalCapabilities []string + Platform string + Topology string + Upgrade string + Version string +} + +func NewEnvironmentalFlags() *EnvironmentalFlags { + return &EnvironmentalFlags{} +} + +func (f *EnvironmentalFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringArrayVar(&f.APIGroups, + "api-group", + f.APIGroups, + "The API groups supported by this cluster. Since: v1.1") + fs.StringVar(&f.Architecture, + "architecture", + "", + "The CPU architecture of the target cluster (\"amd64\", \"arm64\"). Since: v1.0") + fs.StringVar(&f.ExternalConnectivity, + "external-connectivity", + "", + "The External Connectivity of the target cluster (\"Disconnected\", \"Direct\", \"Proxied\"). Since: v1.0") + fs.StringArrayVar(&f.FeatureGates, + "feature-gate", + f.FeatureGates, + "The feature gates enabled on this cluster. Since: v1.1") + fs.StringToStringVar(&f.Facts, + "fact", + make(map[string]string), + "Facts advertised by cluster components. Since: v1.0") + fs.StringVar(&f.Network, + "network", + "", + "The network of the target cluster (\"ovn\", \"sdn\"). Since: v1.0") + fs.StringVar(&f.NetworkStack, + "network-stack", + "", + "The network stack of the target cluster (\"ipv6\", \"ipv4\", \"dual\"). Since: v1.0") + fs.StringSliceVar(&f.OptionalCapabilities, + "optional-capability", + []string{}, + "An Optional Capability of the target cluster. Can be passed multiple times. Since: v1.0") + fs.StringVar(&f.Platform, + "platform", + "", + "The hardware or cloud platform (\"aws\", \"gcp\", \"metal\", ...). Since: v1.0") + fs.StringVar(&f.Topology, + "topology", + "", + "The target cluster topology (\"ha\", \"microshift\", ...). Since: v1.0") + fs.StringVar(&f.Upgrade, + "upgrade", + "", + "The upgrade that was performed prior to the test run (\"micro\", \"minor\"). Since: v1.0") + fs.StringVar(&f.Version, + "version", + "", + "\"major.minor\" version of target cluster. Since: v1.0") +} + +func (f *EnvironmentalFlags) IsEmpty() bool { + v := reflect.ValueOf(*f) + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + + switch field.Kind() { + case reflect.Slice, reflect.Map: + if !field.IsNil() && field.Len() > 0 { + return false + } + default: + if !reflect.DeepEqual(field.Interface(), reflect.Zero(field.Type()).Interface()) { + return false + } + } + } + + return true +} + +// EnvironmentFlagVersions holds the "Since" version metadata for each flag. +var EnvironmentFlagVersions = map[string]string{ + "api-group": "v1.1", + "architecture": "v1.0", + "external-connectivity": "v1.0", + "fact": "v1.0", + "feature-gate": "v1.1", + "network": "v1.0", + "network-stack": "v1.0", + "optional-capability": "v1.0", + "platform": "v1.0", + "topology": "v1.0", + "upgrade": "v1.0", + "version": "v1.0", +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go new file mode 100644 index 000000000000..9e5864839588 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/names.go @@ -0,0 +1,24 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// NamesFlags contains information for specifying multiple test names. +type NamesFlags struct { + Names []string +} + +func NewNamesFlags() *NamesFlags { + return &NamesFlags{ + Names: []string{}, + } +} + +func (f *NamesFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringArrayVarP(&f.Names, + "names", + "n", + f.Names, + "specify test name (can be specified multiple times)") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go new file mode 100644 index 000000000000..24f49f6387b1 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/output.go @@ -0,0 +1,95 @@ +package flags + +import ( + "encoding/json" + "reflect" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/pflag" +) + +// OutputFlags contains information for specifying multiple test names. +type OutputFlags struct { + Output string +} + +func NewOutputFlags() *OutputFlags { + return &OutputFlags{ + Output: "json", + } +} + +func (f *OutputFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVarP(&f.Output, + "output", + "o", + f.Output, + "output mode") +} + +func (o *OutputFlags) Marshal(v interface{}) ([]byte, error) { + switch o.Output { + case "", "json": + j, err := json.MarshalIndent(&v, "", " ") + if err != nil { + return nil, err + } + return j, nil + case "jsonl": + // Check if v is a slice or array + val := reflect.ValueOf(v) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + var result []byte + for i := 0; i < val.Len(); i++ { + item := val.Index(i).Interface() + j, err := json.Marshal(item) + if err != nil { + return nil, err + } + result = append(result, j...) + result = append(result, '\n') // Append newline after each item + } + return result, nil + } + return nil, errors.New("jsonl format requires a slice or array") + case "names": + val := reflect.ValueOf(v) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + var names []string + outerLoop: + for i := 0; i < val.Len(); i++ { + item := val.Index(i) + // Check for Name() or Identifier() methods + itemInterface := item.Interface() + nameFuncs := []string{"Name", "Identifier"} + for _, fn := range nameFuncs { + method := reflect.ValueOf(itemInterface).MethodByName(fn) + if method.IsValid() && method.Kind() == reflect.Func && method.Type().NumIn() == 0 && method.Type().NumOut() == 1 && method.Type().Out(0).Kind() == reflect.String { + name := method.Call(nil)[0].String() + names = append(names, name) + continue outerLoop + } + } + + // Dereference pointer if needed + if item.Kind() == reflect.Ptr { + item = item.Elem() + } + // Check for struct with Name field + if item.Kind() == reflect.Struct { + nameField := item.FieldByName("Name") + if nameField.IsValid() && nameField.Kind() == reflect.String { + names = append(names, nameField.String()) + } + } else { + return nil, errors.New("items must have a Name field or a Name() method") + } + } + return []byte(strings.Join(names, "\n")), nil + } + return nil, errors.New("names format requires an array of structs") + default: + return nil, errors.Errorf("invalid output format: %s", o.Output) + } +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go new file mode 100644 index 000000000000..23de832a850a --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/flags/suite.go @@ -0,0 +1,21 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// SuiteFlags contains information for specifying the suite. +type SuiteFlags struct { + Suite string +} + +func NewSuiteFlags() *SuiteFlags { + return &SuiteFlags{} +} + +func (f *SuiteFlags) BindFlags(fs *pflag.FlagSet) { + fs.StringVar(&f.Suite, + "suite", + f.Suite, + "specify the suite to use") +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go new file mode 100644 index 000000000000..2b7f5aa6d117 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo/util.go @@ -0,0 +1,177 @@ +package ginkgo + +import ( + "fmt" + "os" + "strings" + "sync" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + "github.com/onsi/gomega" + "github.com/pkg/errors" + + "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets" + + ext "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" +) + +func configureGinkgo() (*types.SuiteConfig, *types.ReporterConfig, error) { + if !ginkgo.GetSuite().InPhaseBuildTree() { + if err := ginkgo.GetSuite().BuildTree(); err != nil { + return nil, nil, errors.Wrapf(err, "couldn't build ginkgo tree") + } + } + + // Ginkgo initialization + ginkgo.GetSuite().ClearBeforeAndAfterSuiteNodes() + suiteConfig, reporterConfig := ginkgo.GinkgoConfiguration() + suiteConfig.RandomizeAllSpecs = true + suiteConfig.Timeout = 24 * time.Hour + reporterConfig.NoColor = true + reporterConfig.Verbose = true + ginkgo.SetReporterConfig(reporterConfig) + + // Write output to Stderr + ginkgo.GinkgoWriter = ginkgo.NewWriter(os.Stderr) + + gomega.RegisterFailHandler(ginkgo.Fail) + + return &suiteConfig, &reporterConfig, nil +} + +// BuildExtensionTestSpecsFromOpenShiftGinkgoSuite generates OTE specs for Gingko tests. While OTE isn't limited to +// calling ginkgo tests, anything that implements the ExtensionTestSpec interface can be used, it's the most common +// course of action. The typical use case is to omit selectFns, but if provided, these will filter the returned list +// of specs, applied in the order provided. +func BuildExtensionTestSpecsFromOpenShiftGinkgoSuite(selectFns ...ext.SelectFunction) (ext.ExtensionTestSpecs, error) { + var specs ext.ExtensionTestSpecs + var enforceSerialExecutionForGinkgo sync.Mutex // in-process parallelization for ginkgo is impossible so far + + if _, _, err := configureGinkgo(); err != nil { + return nil, err + } + + cwd, err := os.Getwd() + if err != nil { + return nil, errors.Wrap(err, "couldn't get current working directory") + } + + ginkgo.GetSuite().WalkTests(func(name string, spec types.TestSpec) { + var codeLocations []string + for _, cl := range spec.CodeLocations() { + codeLocations = append(codeLocations, cl.String()) + } + + testCase := &ext.ExtensionTestSpec{ + Name: spec.Text(), + Labels: sets.New[string](spec.Labels()...), + CodeLocations: codeLocations, + Lifecycle: GetLifecycle(spec.Labels()), + Run: func() *ext.ExtensionTestResult { + enforceSerialExecutionForGinkgo.Lock() + defer enforceSerialExecutionForGinkgo.Unlock() + + suiteConfig, reporterConfig, _ := configureGinkgo() + + result := &ext.ExtensionTestResult{ + Name: spec.Text(), + } + + var summary types.SpecReport + ginkgo.GetSuite().RunSpec(spec, ginkgo.Labels{}, "", cwd, ginkgo.GetFailer(), ginkgo.GetWriter(), *suiteConfig, + *reporterConfig) + for _, report := range ginkgo.GetSuite().GetReport().SpecReports { + if report.NumAttempts > 0 { + summary = report + } + } + + result.Output = summary.CapturedGinkgoWriterOutput + result.Error = summary.CapturedStdOutErr + + switch { + case summary.State == types.SpecStatePassed: + result.Result = ext.ResultPassed + case summary.State == types.SpecStateSkipped: + result.Result = ext.ResultSkipped + case summary.State == types.SpecStateFailed, summary.State == types.SpecStatePanicked, summary.State == types.SpecStateInterrupted: + result.Result = ext.ResultFailed + var errors []string + if len(summary.Failure.ForwardedPanic) > 0 { + if len(summary.Failure.Location.FullStackTrace) > 0 { + errors = append(errors, fmt.Sprintf("\n%s\n", summary.Failure.Location.FullStackTrace)) + } + errors = append(errors, fmt.Sprintf("fail [%s:%d]: Test Panicked: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.ForwardedPanic)) + } + errors = append(errors, fmt.Sprintf("fail [%s:%d]: %s", lastFilenameSegment(summary.Failure.Location.FileName), summary.Failure.Location.LineNumber, summary.Failure.Message)) + result.Error = strings.Join(errors, "\n") + default: + panic(fmt.Sprintf("test produced unknown outcome: %#v", summary)) + } + + return result + }, + } + specs = append(specs, testCase) + }) + + // Default select function is to exclude vendored specs. When relying on Kubernetes test framework for its helpers, + // it also unfortunately ends up importing *all* Gingko specs. This is unsafe: it would potentially override the + // kube specs already present in origin. The best course of action is enforce this behavior on everyone. If for + // some reason, you must include vendored specs, you can opt-in directly by supplying your own SelectFunctions or using + // AllTestsIncludedVendored(). + if len(selectFns) == 0 { + selectFns = []ext.SelectFunction{ext.ModuleTestsOnly()} + } + + for _, selectFn := range selectFns { + specs = specs.Select(selectFn) + } + + return specs, nil +} + +func Informing() ginkgo.Labels { + return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleInforming)) +} + +func Slow() ginkgo.Labels { + return ginkgo.Label("SLOW") +} + +func Blocking() ginkgo.Labels { + return ginkgo.Label(fmt.Sprintf("Lifecycle:%s", ext.LifecycleBlocking)) +} + +func GetLifecycle(labels ginkgo.Labels) ext.Lifecycle { + for _, label := range labels { + res := strings.Split(label, ":") + if len(res) != 2 || !strings.EqualFold(res[0], "lifecycle") { + continue + } + return MustLifecycle(res[1]) // this panics if unsupported lifecycle is used + } + + return ext.LifecycleBlocking +} + +func MustLifecycle(l string) ext.Lifecycle { + switch ext.Lifecycle(l) { + case ext.LifecycleInforming, ext.LifecycleBlocking: + return ext.Lifecycle(l) + default: + panic(fmt.Sprintf("unknown test lifecycle: %s", l)) + } +} + +func lastFilenameSegment(filename string) string { + if parts := strings.Split(filename, "/vendor/"); len(parts) > 1 { + return parts[len(parts)-1] + } + if parts := strings.Split(filename, "/src/"); len(parts) > 1 { + return parts[len(parts)-1] + } + return filename +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md new file mode 100644 index 000000000000..1a5def7723b4 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/README.md @@ -0,0 +1,3 @@ +This package is copy/pasted from [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery/tree/master/pkg/util/sets) +to avoid a circular dependency with `openshift/kubernetes` as it requires OTE and, without having done this, +OTE would require `kubernetes/kubernetes`. diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go new file mode 100644 index 000000000000..4d7a17c3afad --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/byte.go @@ -0,0 +1,137 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[byte]{} +// s2 := New[byte]() +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + return Byte(New[byte](items...)) +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet[T any](theMap map[byte]T) Byte { + return Byte(KeySet(theMap)) +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) Byte { + return Byte(cast(s).Insert(items...)) +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) Byte { + return Byte(cast(s).Delete(items...)) +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + return cast(s).Has(item) +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + return cast(s).HasAll(items...) +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + return cast(s).HasAny(items...) +} + +// Clone returns a new set which is a copy of the current set. +func (s Byte) Clone() Byte { + return Byte(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Byte) Difference(s2 Byte) Byte { + return Byte(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Byte) SymmetricDifference(s2 Byte) Byte { + return Byte(cast(s1).SymmetricDifference(cast(s2))) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + return Byte(cast(s1).Union(cast(s2))) +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + return Byte(cast(s1).Intersection(cast(s2))) +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + return cast(s1).IsSuperset(cast(s2)) +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return cast(s1).Equal(cast(s2)) +} + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + return List(cast(s)) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + return cast(s).UnsortedList() +} + +// PopAny returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + return cast(s).PopAny() +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go new file mode 100644 index 000000000000..997f5e0330e0 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sets has generic set and specified sets. Generic set will +// replace specified ones over time. And specific ones are deprecated. +package sets // import "github.com/openshift-eng/openshift-tests-extension/pkg/util/sets" diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go new file mode 100644 index 000000000000..fbb1df06d922 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/empty.go @@ -0,0 +1,21 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go new file mode 100644 index 000000000000..5876fc9deb96 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int.go @@ -0,0 +1,137 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int]{} +// s2 := New[int]() +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + return Int(New[int](items...)) +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet[T any](theMap map[int]T) Int { + return Int(KeySet(theMap)) +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) Int { + return Int(cast(s).Insert(items...)) +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) Int { + return Int(cast(s).Delete(items...)) +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + return cast(s).Has(item) +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + return cast(s).HasAll(items...) +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + return cast(s).HasAny(items...) +} + +// Clone returns a new set which is a copy of the current set. +func (s Int) Clone() Int { + return Int(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Int) Difference(s2 Int) Int { + return Int(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int) SymmetricDifference(s2 Int) Int { + return Int(cast(s1).SymmetricDifference(cast(s2))) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + return Int(cast(s1).Union(cast(s2))) +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + return Int(cast(s1).Intersection(cast(s2))) +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + return cast(s1).IsSuperset(cast(s2)) +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return cast(s1).Equal(cast(s2)) +} + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + return List(cast(s)) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + return cast(s).UnsortedList() +} + +// PopAny returns a single element from the set. +func (s Int) PopAny() (int, bool) { + return cast(s).PopAny() +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go new file mode 100644 index 000000000000..2c640c5d0f1d --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int32.go @@ -0,0 +1,137 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int32]{} +// s2 := New[int32]() +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + return Int32(New[int32](items...)) +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet[T any](theMap map[int32]T) Int32 { + return Int32(KeySet(theMap)) +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + return Int32(cast(s).Insert(items...)) +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + return Int32(cast(s).Delete(items...)) +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + return cast(s).Has(item) +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + return cast(s).HasAll(items...) +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + return cast(s).HasAny(items...) +} + +// Clone returns a new set which is a copy of the current set. +func (s Int32) Clone() Int32 { + return Int32(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Int32) Difference(s2 Int32) Int32 { + return Int32(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int32) SymmetricDifference(s2 Int32) Int32 { + return Int32(cast(s1).SymmetricDifference(cast(s2))) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + return Int32(cast(s1).Union(cast(s2))) +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + return Int32(cast(s1).Intersection(cast(s2))) +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + return cast(s1).IsSuperset(cast(s2)) +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return cast(s1).Equal(cast(s2)) +} + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + return List(cast(s)) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + return cast(s).UnsortedList() +} + +// PopAny returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + return cast(s).PopAny() +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go new file mode 100644 index 000000000000..bf3eb3ffa259 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/int64.go @@ -0,0 +1,137 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[int64]{} +// s2 := New[int64]() +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + return Int64(New[int64](items...)) +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet[T any](theMap map[int64]T) Int64 { + return Int64(KeySet(theMap)) +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) Int64 { + return Int64(cast(s).Insert(items...)) +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) Int64 { + return Int64(cast(s).Delete(items...)) +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + return cast(s).Has(item) +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + return cast(s).HasAll(items...) +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + return cast(s).HasAny(items...) +} + +// Clone returns a new set which is a copy of the current set. +func (s Int64) Clone() Int64 { + return Int64(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Int64) Difference(s2 Int64) Int64 { + return Int64(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Int64) SymmetricDifference(s2 Int64) Int64 { + return Int64(cast(s1).SymmetricDifference(cast(s2))) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + return Int64(cast(s1).Union(cast(s2))) +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + return Int64(cast(s1).Intersection(cast(s2))) +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + return cast(s1).IsSuperset(cast(s2)) +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return cast(s1).Equal(cast(s2)) +} + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + return List(cast(s)) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + return cast(s).UnsortedList() +} + +// PopAny returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + return cast(s).PopAny() +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go new file mode 100644 index 000000000000..cd961c8c5939 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/set.go @@ -0,0 +1,236 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "cmp" + "sort" +) + +// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption. +type Set[T comparable] map[T]Empty + +// cast transforms specified set to generic Set[T]. +func cast[T comparable](s map[T]Empty) Set[T] { return s } + +// New creates a Set from a list of values. +// NOTE: type param must be explicitly instantiated if given items are empty. +func New[T comparable](items ...T) Set[T] { + ss := make(Set[T], len(items)) + ss.Insert(items...) + return ss +} + +// KeySet creates a Set from a keys of a map[comparable](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func KeySet[T comparable, V any](theMap map[T]V) Set[T] { + ret := make(Set[T], len(theMap)) + for keyValue := range theMap { + ret.Insert(keyValue) + } + return ret +} + +// Insert adds items to the set. +func (s Set[T]) Insert(items ...T) Set[T] { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +func Insert[T comparable](set Set[T], items ...T) Set[T] { + return set.Insert(items...) +} + +// Delete removes all items from the set. +func (s Set[T]) Delete(items ...T) Set[T] { + for _, item := range items { + delete(s, item) + } + return s +} + +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +func (s Set[T]) Clear() Set[T] { + clear(s) + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Set[T]) Has(item T) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Set[T]) HasAll(items ...T) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Set[T]) HasAny(items ...T) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Clone returns a new set which is a copy of the current set. +func (s Set[T]) Clone() Set[T] { + result := make(Set[T], len(s)) + for key := range s { + result.Insert(key) + } + return result +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 Set[T]) Difference(s2 Set[T]) Set[T] { + result := New[T]() + for key := range s1 { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 Set[T]) SymmetricDifference(s2 Set[T]) Set[T] { + return s1.Difference(s2).Union(s2.Difference(s1)) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Set[T]) Union(s2 Set[T]) Set[T] { + result := s1.Clone() + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Set[T]) Intersection(s2 Set[T]) Set[T] { + var walk, other Set[T] + result := New[T]() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Set[T]) IsSuperset(s2 Set[T]) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Set[T]) Equal(s2 Set[T]) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfGeneric[T cmp.Ordered] []T + +func (g sortableSliceOfGeneric[T]) Len() int { return len(g) } +func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) } +func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +// List returns the contents as a sorted T slice. +// +// This is a separate function and not a method because not all types supported +// by Generic are ordered and only those can be sorted. +func List[T cmp.Ordered](s Set[T]) []T { + res := make(sortableSliceOfGeneric[T], 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return res +} + +// UnsortedList returns the slice with contents in random order. +func (s Set[T]) UnsortedList() []T { + res := make([]T, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// PopAny returns a single element from the set. +func (s Set[T]) PopAny() (T, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue T + return zeroValue, false +} + +// Len returns the size of the set. +func (s Set[T]) Len() int { + return len(s) +} + +func less[T cmp.Ordered](lhs, rhs T) bool { + return lhs < rhs +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go new file mode 100644 index 000000000000..1dab6d13cc79 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/util/sets/string.go @@ -0,0 +1,137 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +// String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +// +// Deprecated: use generic Set instead. +// new ways: +// s1 := Set[string]{} +// s2 := New[string]() +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + return String(New[string](items...)) +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet[T any](theMap map[string]T) String { + return String(KeySet(theMap)) +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + return String(cast(s).Insert(items...)) +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + return String(cast(s).Delete(items...)) +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + return cast(s).Has(item) +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + return cast(s).HasAll(items...) +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + return cast(s).HasAny(items...) +} + +// Clone returns a new set which is a copy of the current set. +func (s String) Clone() String { + return String(cast(s).Clone()) +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s1 String) Difference(s2 String) String { + return String(cast(s1).Difference(cast(s2))) +} + +// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.SymmetricDifference(s2) = {a3, a4, a5} +// s2.SymmetricDifference(s1) = {a3, a4, a5} +func (s1 String) SymmetricDifference(s2 String) String { + return String(cast(s1).SymmetricDifference(cast(s2))) +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + return String(cast(s1).Union(cast(s2))) +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + return String(cast(s1).Intersection(cast(s2))) +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + return cast(s1).IsSuperset(cast(s2)) +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return cast(s1).Equal(cast(s2)) +} + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + return List(cast(s)) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + return cast(s).UnsortedList() +} + +// PopAny returns a single element from the set. +func (s String) PopAny() (string, bool) { + return cast(s).PopAny() +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} diff --git a/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go new file mode 100644 index 000000000000..7d6a3309b312 --- /dev/null +++ b/vendor/github.com/openshift-eng/openshift-tests-extension/pkg/version/version.go @@ -0,0 +1,11 @@ +package version + +var ( + // CommitFromGit is a constant representing the source version that + // generated this build. It should be set during build via -ldflags. + CommitFromGit string + // BuildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + BuildDate string + // GitTreeState has the state of git tree, either "clean" or "dirty" + GitTreeState string +) diff --git a/vendor/modules.txt b/vendor/modules.txt index da7416574cd7..043f028ca395 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -836,6 +836,18 @@ github.com/opencontainers/runtime-spec/specs-go github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir +# github.com/openshift-eng/openshift-tests-extension v0.0.0-20250522124649-4ffcd156ec7c +## explicit; go 1.23.0 +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdinfo +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdlist +github.com/openshift-eng/openshift-tests-extension/pkg/cmd/cmdrun +github.com/openshift-eng/openshift-tests-extension/pkg/dbtime +github.com/openshift-eng/openshift-tests-extension/pkg/extension +github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests +github.com/openshift-eng/openshift-tests-extension/pkg/flags +github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo +github.com/openshift-eng/openshift-tests-extension/pkg/util/sets +github.com/openshift-eng/openshift-tests-extension/pkg/version # github.com/openshift-kni/commatrix v0.0.4-0.20250604173218-064b4004e9fb ## explicit; go 1.22.0 github.com/openshift-kni/commatrix/pkg/client