Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,12 @@ test-e2e:
KUBERNETES_CONFIG="$(KUBECONFIG)" $(GO) test -v -timeout 40m ./test/e2e/$$d -ginkgo.v -ginkgo.noColor -ginkgo.failFast || exit; \
done

.PHONY: test-e2e-local
test-e2e-local:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we make the make target more specific as in

performance-profile-creator-tests: build-performance-profile-creator
?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was wondering if we can have more tests that are e2e in nature, but do not require a running cluster.. but the name is not important obviously.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if we are planing to add a render lane in openshift/release then it should correspond to the make target

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@MarSik we currently have https://github.com/openshift/cluster-node-tuning-operator/blob/master/hack/run-render-command-functests.sh under this repo. If this target comes to replace the invocation of this script like we did in PAO then we can remove the script (although the script contains some additional decorations)

for d in performanceprofile/functests-render-command/1_render_command; do \
$(GO) test -v -timeout 40m ./test/e2e/$$d -ginkgo.v -ginkgo.noColor -ginkgo.failFast || exit; \
done

verify: verify-gofmt

verify-gofmt:
Expand Down
198 changes: 115 additions & 83 deletions cmd/cluster-node-tuning-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import (
mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1"
olmv1 "github.com/operator-framework/api/pkg/operators/v1"
olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
Expand All @@ -26,6 +28,7 @@ import (
"github.com/openshift/cluster-node-tuning-operator/pkg/config"
"github.com/openshift/cluster-node-tuning-operator/pkg/metrics"
"github.com/openshift/cluster-node-tuning-operator/pkg/operator"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/cmd/render"
"github.com/openshift/cluster-node-tuning-operator/pkg/signals"
"github.com/openshift/cluster-node-tuning-operator/pkg/tuned"
"github.com/openshift/cluster-node-tuning-operator/pkg/util"
Expand Down Expand Up @@ -63,97 +66,126 @@ func printVersion() {
klog.Infof("%s Version: %s", tunedv1.TunedClusterOperatorResourceName, version.Version)
}

func main() {
var rootCmd = &cobra.Command{
Use: operatorFilename,
Short: "NTO manages the containerized TuneD instances",
Run: func(cmd *cobra.Command, args []string) {
operatorRun()
},
}

var enableLeaderElection bool
var showVersionAndExit bool

func prepareCommands() {
rootCmd.PersistentFlags().BoolVar(&enableLeaderElection, "enable-leader-election", true,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
rootCmd.PersistentFlags().BoolVar(&showVersionAndExit, "version", false,
"Show program version and exit.")

// Include the klog command line arguments
klog.InitFlags(nil)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)

rootCmd.AddCommand(render.NewRenderCommand())
}

func operatorRun() {
printVersion()

if showVersionAndExit {
return
}

// We have two namespaces that we need to watch:
// 1. NTO namespace - for NTO resources
// 2. None namespace - for cluster wide resources
ntoNamespace := config.OperatorNamespace()
namespaces := []string{
ntoNamespace,
metav1.NamespaceNone,
}

restConfig := ctrl.GetConfigOrDie()
le := util.GetLeaderElectionConfig(restConfig, enableLeaderElection)
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
NewCache: cache.MultiNamespacedCacheBuilder(namespaces),
Scheme: scheme,
LeaderElection: true,
LeaderElectionID: config.OperatorLockName,
LeaderElectionNamespace: ntoNamespace,
LeaseDuration: &le.LeaseDuration.Duration,
RetryPeriod: &le.RetryPeriod.Duration,
RenewDeadline: &le.RenewDeadline.Duration,
Namespace: ntoNamespace,
})

if err != nil {
klog.Exit(err)
}

controller, err := operator.NewController()
if err != nil {
klog.Fatalf("failed to create new controller: %v", err)
}

if err := mgr.Add(controller); err != nil {
klog.Fatalf("failed to add new controller to the manager: %v", err)
}

if err := mgr.Add(metrics.Server{}); err != nil {
klog.Fatalf("unable to add metrics server as runnable under the manager: %v", err)
}
metrics.RegisterVersion(version.Version)

if err = (&paocontroller.PerformanceProfileReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("performance-profile-controller"),
}).SetupWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile controller: %v", err)
}

// Configure webhook server.
webHookServer := mgr.GetWebhookServer()
webHookServer.Port = webhookPort
webHookServer.CertDir = webhookCertDir
webHookServer.CertName = webhookCertName
webHookServer.KeyName = webhookKeyName

if err = (&performancev1.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile v1 webhook: %v", err)
}

if err = (&performancev2.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile v2 webhook: %v", err)
}

if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
klog.Exitf("manager exited with non-zero code: %v", err)
}
}

func tunedOperandRun() {
var boolVersion bool
var enableLeaderElection bool
flag.BoolVar(&boolVersion, "version", false, "show program version and exit")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", true,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")

// flag.Parse is called from within tuned.Run -> parseCmdOpts
// but the version flag variable is inherited from here..

stopCh := signals.SetupSignalHandler()
tuned.Run(stopCh, &boolVersion, version.Version)
}

func main() {
runAs := filepath.Base(os.Args[0])

switch runAs {
case operatorFilename:
klog.InitFlags(nil)
flag.Parse()

printVersion()

if boolVersion {
os.Exit(0)
}

// We have two namespaces that we need to watch:
// 1. NTO namespace - for NTO resources
// 2. None namespace - for cluster wide resources
ntoNamespace := config.OperatorNamespace()
namespaces := []string{
ntoNamespace,
metav1.NamespaceNone,
}

restConfig := ctrl.GetConfigOrDie()
le := util.GetLeaderElectionConfig(restConfig, enableLeaderElection)
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
NewCache: cache.MultiNamespacedCacheBuilder(namespaces),
Scheme: scheme,
LeaderElection: true,
LeaderElectionID: config.OperatorLockName,
LeaderElectionNamespace: ntoNamespace,
LeaseDuration: &le.LeaseDuration.Duration,
RetryPeriod: &le.RetryPeriod.Duration,
RenewDeadline: &le.RenewDeadline.Duration,
Namespace: ntoNamespace,
})

if err != nil {
klog.Exit(err)
}

controller, err := operator.NewController()
if err != nil {
klog.Fatalf("failed to create new controller: %v", err)
}

if err := mgr.Add(controller); err != nil {
klog.Fatalf("failed to add new controller to the manager: %v", err)
}

if err := mgr.Add(metrics.Server{}); err != nil {
klog.Fatalf("unable to add metrics server as runnable under the manager: %v", err)
}
metrics.RegisterVersion(version.Version)

if err = (&paocontroller.PerformanceProfileReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("performance-profile-controller"),
}).SetupWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile controller: %v", err)
}

// Configure webhook server.
webHookServer := mgr.GetWebhookServer()
webHookServer.Port = webhookPort
webHookServer.CertDir = webhookCertDir
webHookServer.CertName = webhookCertName
webHookServer.KeyName = webhookKeyName

if err = (&performancev1.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile v1 webhook: %v", err)
}

if err = (&performancev2.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile v2 webhook: %v", err)
}

if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
klog.Exitf("manager exited with non-zero code: %v", err)
}
prepareCommands()
_ = rootCmd.Execute()
case operandFilename:
stopCh := signals.SetupSignalHandler()
tuned.Run(stopCh, &boolVersion, version.Version)
tunedOperandRun()
default:
klog.Fatalf("application should be run as \"%s\" or \"%s\"", operatorFilename, operandFilename)
}
Expand Down
4 changes: 2 additions & 2 deletions docs/performanceprofile/performance_addon.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,12 @@ export ASSET_OUTPUT_DIR=<output path for the rendered manifests>

Build and invoke the binary
```
build/_output/bin/performance-addon-operators render (FIXME)
_output/cluster-node-tuning-operator render
```

Or provide the variables via command line arguments
```
build/_output/bin/performance-addon-operators render --performance-profile-input-files <path> --asset-output-dir<path> (FIXME)
_output/cluster-node-tuning-operator render --performance-profile-input-files <path> --asset-output-dir<path>
```

# Troubleshooting
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestRenderCmd(t *testing.T) {
rr = append(rr, &ginkgo_reporters.Polarion)
}
rr = append(rr, junit.NewJUnitReporter("render_manifests"))
RunSpecsWithDefaultAndCustomReporters(t, "Performance Operator render tests", rr)
RunSpecsWithDefaultAndCustomReporters(t, "Performance Profile render tests", rr)
}

var _ = BeforeSuite(func() {
Expand All @@ -38,8 +38,8 @@ var _ = BeforeSuite(func() {
}

testDir = filepath.Dir(file)
workspaceDir = filepath.Clean(filepath.Join(testDir, "..", ".."))
binPath = filepath.Clean(filepath.Join(workspaceDir, "build", "_output", "bin"))
workspaceDir = filepath.Clean(filepath.Join(testDir, "..", "..", "..", "..", ".."))
binPath = filepath.Clean(filepath.Join(workspaceDir, "_output"))
fmt.Fprintf(GinkgoWriter, "using binary at %q\n", binPath)
})

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ var _ = Describe("render command e2e test", func() {
It("Gets cli args and produces the expected components to output directory", func() {

cmdline := []string{
filepath.Join(binPath, "performance-addon-operators"),
filepath.Join(binPath, "cluster-node-tuning-operator"),
"render",
"--performance-profile-input-files", ppInFiles,
"--asset-input-dir", assetsInDir,
Expand All @@ -46,7 +46,7 @@ var _ = Describe("render command e2e test", func() {

It("Gets environment variables and produces the expected components to output directory", func() {
cmdline := []string{
filepath.Join(binPath, "performance-addon-operators"),
filepath.Join(binPath, "cluster-node-tuning-operator"),
"render",
}
fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ spec:
- machineConfigLabels:
machineconfiguration.openshift.io/role: worker-cnf
operand:
debug: false
tunedConfig:
reapply_sysctl: null
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reapply_sysctl is a Boolean, why does it have null value?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You tell me :) This is the value the test is seeing, I only updated the expected value for it to pass as PAO does not care about this part.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I might tell you, but I have no idea about the test you're talking about. :) I guess it is null because tunedConfig was not specified at all? Good to know it was manually updated by you.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the expected value of Tuned when PerfProfile is processed by the render stage. If you check couple of lines above this, we mostly care about the generated tuned profile. The rest is just what NTO structures serialize to by default.

priority: 20
profile: openshift-node-performance-manual
status: {}