-
Notifications
You must be signed in to change notification settings - Fork 254
provide a means to abandon deprovisioning #1017
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,7 @@ | ||
| package deprovision | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "io/ioutil" | ||
| "os" | ||
|
|
@@ -10,24 +11,36 @@ import ( | |
|
|
||
| log "github.com/sirupsen/logrus" | ||
| "github.com/spf13/cobra" | ||
| "k8s.io/apimachinery/pkg/runtime" | ||
| "k8s.io/apimachinery/pkg/util/wait" | ||
| "k8s.io/client-go/tools/clientcmd" | ||
| "sigs.k8s.io/controller-runtime/pkg/client" | ||
|
|
||
| "github.com/openshift/installer/pkg/destroy/aws" | ||
| "github.com/openshift/library-go/pkg/controller/fileobserver" | ||
|
|
||
| hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" | ||
| "github.com/openshift/hive/pkg/constants" | ||
| ) | ||
|
|
||
| type awsTagDeprovisionOpts struct { | ||
| logLevel string | ||
| region string | ||
| filters []aws.Filter | ||
| clusterDeprovision string | ||
| logger log.FieldLogger | ||
| } | ||
|
|
||
| // NewDeprovisionAWSWithTagsCommand is the entrypoint to create the 'aws-tag-deprovision' subcommand | ||
| // TODO: Port to a sub-command of deprovision. | ||
| func NewDeprovisionAWSWithTagsCommand() *cobra.Command { | ||
| opt := &aws.ClusterUninstaller{} | ||
| var logLevel string | ||
| opt := &awsTagDeprovisionOpts{} | ||
| cmd := &cobra.Command{ | ||
| Use: "aws-tag-deprovision KEY=VALUE ...", | ||
| Short: "Deprovision AWS assets (as created by openshift-installer) with the given tag(s)", | ||
| Long: "Deprovision AWS assets (as created by openshift-installer) with the given tag(s). A resource matches the filter if any of the key/value pairs are in its tags.", | ||
| Run: func(cmd *cobra.Command, args []string) { | ||
| if err := completeAWSUninstaller(opt, logLevel, args); err != nil { | ||
| if err := opt.complete(args); err != nil { | ||
| log.WithError(err).Error("Cannot complete command") | ||
| return | ||
| } | ||
|
|
@@ -79,36 +92,36 @@ func NewDeprovisionAWSWithTagsCommand() *cobra.Command { | |
| }() | ||
| } | ||
|
|
||
| if err := opt.Run(); err != nil { | ||
| if err := opt.run(); err != nil { | ||
| log.WithError(err).Fatal("Runtime error") | ||
| } | ||
| }, | ||
| } | ||
| flags := cmd.Flags() | ||
| flags.StringVar(&logLevel, "loglevel", "info", "log level, one of: debug, info, warn, error, fatal, panic") | ||
| flags.StringVar(&opt.Region, "region", "us-east-1", "AWS region to use") | ||
| flags.StringVar(&opt.logLevel, "loglevel", "info", "log level, one of: debug, info, warn, error, fatal, panic") | ||
| flags.StringVar(&opt.region, "region", "us-east-1", "AWS region to use") | ||
| flags.StringVar(&opt.clusterDeprovision, "clusterdeprovision", "", "name of the ClusterDeprovision in which to stored blocked resources") | ||
| return cmd | ||
| } | ||
|
|
||
| func completeAWSUninstaller(o *aws.ClusterUninstaller, logLevel string, args []string) error { | ||
|
|
||
| func (o *awsTagDeprovisionOpts) complete(args []string) error { | ||
| for _, arg := range args { | ||
| filter := aws.Filter{} | ||
| err := parseFilter(filter, arg) | ||
| if err != nil { | ||
| return fmt.Errorf("cannot parse filter %s: %v", arg, err) | ||
| } | ||
| o.Filters = append(o.Filters, filter) | ||
| o.filters = append(o.filters, filter) | ||
| } | ||
|
|
||
| // Set log level | ||
| level, err := log.ParseLevel(logLevel) | ||
| level, err := log.ParseLevel(o.logLevel) | ||
| if err != nil { | ||
| log.WithError(err).Error("cannot parse log level") | ||
| return err | ||
| } | ||
|
|
||
| o.Logger = log.NewEntry(&log.Logger{ | ||
| o.logger = log.NewEntry(&log.Logger{ | ||
| Out: os.Stdout, | ||
| Formatter: &log.TextFormatter{ | ||
| FullTimestamp: true, | ||
|
|
@@ -120,6 +133,66 @@ func completeAWSUninstaller(o *aws.ClusterUninstaller, logLevel string, args []s | |
| return nil | ||
| } | ||
|
|
||
| func (o *awsTagDeprovisionOpts) run() error { | ||
| return wait.ExponentialBackoff( | ||
| // Start the backoff at 5 minutes, double it each time, to a cap of 24 hours. | ||
| wait.Backoff{ | ||
| Duration: 5 * time.Minute, | ||
| Factor: 2, | ||
| Steps: 1 << 8, // large enough to make cap the effective bound | ||
| Cap: 24 * time.Hour, | ||
| }, | ||
| func() (done bool, returnErr error) { | ||
| ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) | ||
| defer cancel() | ||
| uninstaller := &aws.ClusterUninstaller{ | ||
| Filters: o.filters, | ||
| Logger: o.logger, | ||
| Region: o.region, | ||
| } | ||
| blockedResources, err := uninstaller.RunWithContext(ctx) | ||
| if len(blockedResources) == 0 { | ||
| return true, err | ||
| } | ||
| if o.clusterDeprovision == "" { | ||
| return | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we not also return
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If there are any blocked resources, then we want to keep trying to uninstall in the backoff loop. If we return However, maybe it would be good to distinguish between an error because the context expired and an error for other reasons. In the former case, we want to keep trying. In the latter case, we want to abort. I'll look into that. |
||
| } | ||
| kubeconfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( | ||
| clientcmd.NewDefaultClientConfigLoadingRules(), | ||
| &clientcmd.ConfigOverrides{}, | ||
| ) | ||
| namespace, _, err := kubeconfig.Namespace() | ||
| if err != nil { | ||
| o.logger.WithError(err).Error("could not get the namespace") | ||
| return | ||
| } | ||
|
Comment on lines
+164
to
+168
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does this use the context setup using the kubeconfig to get the namespace? How many users would use something like that? I personally haven't So should we also allow for setting namespace directly? |
||
| clientConfig, err := kubeconfig.ClientConfig() | ||
| if err != nil { | ||
| o.logger.WithError(err).Error("could not get the kube client config") | ||
| return | ||
| } | ||
| scheme := runtime.NewScheme() | ||
| hivev1.AddToScheme(scheme) | ||
| c, err := client.New(clientConfig, client.Options{Scheme: scheme}) | ||
| if err != nil { | ||
| o.logger.WithError(err).Error("could not get kube client") | ||
| return | ||
| } | ||
| clusterDeprovision := &hivev1.ClusterDeprovision{} | ||
| if err := c.Get(context.Background(), client.ObjectKey{Namespace: namespace, Name: o.clusterDeprovision}, clusterDeprovision); err != nil { | ||
| o.logger.WithError(err).Error("could not get ClusterDeprovision") | ||
| return | ||
| } | ||
| clusterDeprovision.Status.BlockedResources = blockedResources | ||
| if err := c.Status().Update(context.Background(), clusterDeprovision); err != nil { | ||
| o.logger.WithError(err).Error("could not update ClusterDeprovision") | ||
| return | ||
| } | ||
| return | ||
| }, | ||
| ) | ||
| } | ||
|
|
||
| func parseFilter(filterMap aws.Filter, str string) error { | ||
| parts := strings.SplitN(str, "=", 2) | ||
| if len(parts) != 2 { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -25,6 +25,9 @@ type ClusterDeprovisionStatus struct { | |
| // Conditions includes more detailed status for the cluster deprovision | ||
| // +optional | ||
| Conditions []ClusterDeprovisionCondition `json:"conditions,omitempty"` | ||
|
|
||
| // BlockedResources is a list of cloud resources that the deprovision has not been able to delete | ||
| BlockedResources []string `json:"blockedResources,omitempty"` | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How are things looking for the other cloud providers, will a flat list of strings be sufficient as far as we can see? Wondering if we should go with something that lets us store a little more data or not.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a good idea. I can change |
||
| } | ||
|
|
||
| // ClusterDeprovisionPlatform contains platform-specific configuration for the | ||
|
|
||
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -1296,6 +1296,20 @@ func (r *ReconcileClusterDeployment) ensureClusterDeprovisioned(cd *hivev1.Clust | |||||
| cdLog.Info("PreserveOnDelete=true but creating deprovisioning request as cluster was never successfully provisioned") | ||||||
| } | ||||||
|
|
||||||
| // Stop waiting for deprovision if the abandon-deprovision annotation is true | ||||||
| if value, ok := cd.Annotations[constants.AbandonDeprovisionAnnotation]; ok { | ||||||
| logger := cdLog.WithField(constants.AbandonDeprovisionAnnotation, value) | ||||||
| if abandon, err := strconv.ParseBool(value); abandon && err == nil { | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
wouldn't that be more appropriate? |
||||||
| logger.Warn("adandoning deprovision") | ||||||
| err = r.removeClusterDeploymentFinalizer(cd, cdLog) | ||||||
| if err != nil { | ||||||
| cdLog.WithError(err).Log(controllerutils.LogLevel(err), "error removing finalizer") | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we will return |
||||||
| } | ||||||
| return true, err | ||||||
| } | ||||||
| logger.Debug("ignoring abandon-deprovision annotation") | ||||||
| } | ||||||
|
|
||||||
| if cd.Spec.ClusterMetadata == nil { | ||||||
| cdLog.Warn("skipping uninstall for cluster that never had clusterID set") | ||||||
| return true, nil | ||||||
|
|
||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
nit: named returns are not great as they allow empty
returrnin the function definition which makes it difficult to read what is being returned and when was it set..