Skip to content

Commit

Permalink
address GC controller review feedback
Browse files Browse the repository at this point in the history
Signed-off-by: Steve Kriss <[email protected]>
  • Loading branch information
skriss committed Dec 20, 2017
1 parent cdd8178 commit af71115
Show file tree
Hide file tree
Showing 4 changed files with 175 additions and 110 deletions.
12 changes: 12 additions & 0 deletions pkg/controller/backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,18 @@ func (controller *backupController) processBackup(key string) error {
return nil
}

// has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
}

return false
}

func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter) (*api.Backup, error) {
origBytes, err := json.Marshal(original)
if err != nil {
Expand Down
65 changes: 19 additions & 46 deletions pkg/controller/gc_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"

Expand Down Expand Up @@ -113,54 +114,37 @@ func (c *gcController) handleFinalizer(_, newObj interface{}) {
}
log.Debugf("Backup has finalizers %s", backup.Finalizers)

// GC finalizer not found
if !has(backup.Finalizers, gcFinalizer) {
// only process the GC finalizer when it's the first item
// in the slice
if backup.Finalizers[0] != gcFinalizer {
return
}

log.Infof("Garbage-collecting backup")
if err := c.garbageCollect(backup, log); err != nil {
// if there were errors deleting related cloud resources, don't
// delete the backup API object because we don't want to orphan
// the cloud resources.
log.WithError(err).Error("Error deleting backup's related objects")
return
}

patchMap := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": except(backup.Finalizers, gcFinalizer),
"finalizers": except(backup.Finalizers, gcFinalizer),
"resourceVersion": backup.ResourceVersion,
},
}

patchBytes, err := json.Marshal(patchMap)
if err != nil {
log.WithError(err).Error("Error marshaling finalizers patch")
}

upd, err := c.backupClient.Backups(api.DefaultNamespace).Patch(backup.Name, types.MergePatchType, patchBytes)
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup")
return
}

// no more finalizers: we don't need to issue another delete
if len(upd.Finalizers) == 0 {
return
}

if err := c.backupClient.Backups(api.DefaultNamespace).Delete(upd.Name, &metav1.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting backup")
}
}

// has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
if _, err = c.backupClient.Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes); err != nil {
log.WithError(errors.WithStack(err)).Error("Error patching backup")
}

return false
}

// except returns a new string slice that contains all of the entries
Expand Down Expand Up @@ -193,7 +177,7 @@ func (c *gcController) Run(ctx context.Context, workers int) error {

func (c *gcController) run() {
now := c.clock.Now()
c.logger.Info("Garbage-collecting backups that have expired as of now")
c.logger.Info("Garbage-collecting expired backups")

// Go thru API objects and delete expired ones (finalizer will GC their
// corresponding files/snapshots/restores). Note that we're ignoring backups
Expand All @@ -208,14 +192,14 @@ func (c *gcController) run() {
for _, backup := range backups {
log := c.logger.WithField("backup", kube.NamespaceAndName(backup))
if backup.Status.Expiration.Time.After(now) {
log.Info("Backup has not expired yet, skipping")
log.Debug("Backup has not expired yet, skipping")
continue
}

// since backups have a finalizer, this will actually have the effect of setting a deletionTimestamp and calling
// an update. The update will be handled by this controller and will result in a deletion of the obj storage
// files and the API object.
if err := c.backupClient.Backups(api.DefaultNamespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
log.WithError(errors.WithStack(err)).Error("Error deleting backup")
}
}
Expand All @@ -227,28 +211,21 @@ func (c *gcController) garbageCollect(backup *api.Backup, log logrus.FieldLogger
// if the backup includes snapshots but we don't currently have a PVProvider, we don't
// want to orphan the snapshots so skip garbage-collection entirely.
if c.snapshotService == nil && len(backup.Status.VolumeBackups) > 0 {
return errors.New("cannot garbage-collect backup because backup includes snapshots and server is not configured with PersistentVolumeProvider")
return errors.New("cannot garbage-collect backup because it includes snapshots and Ark is not configured with a PersistentVolumeProvider")
}

// The GC process is primarily intended to delete expired cloud resources (file in object
// storage, snapshots). If we fail to delete any of these, we don't delete the Backup API
// object so that we don't orphan the cloud resources.
deletionFailure := false
var errs []error

for _, volumeBackup := range backup.Status.VolumeBackups {
log.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
log.WithError(err).WithField("snapshotID", volumeBackup.SnapshotID).Error("Error deleting snapshot")
deletionFailure = true
errs = append(errs, errors.Wrapf(err, "error deleting snapshot %s", volumeBackup.SnapshotID))
}
}

// delete everything in the backup dir in object storage *before* deleting the API object
// because otherwise the backup sync controller could re-sync the backup from object storage.
log.Info("Removing backup from object storage")
if err := c.backupService.DeleteBackupDir(c.bucket, backup.Name); err != nil {
log.WithError(err).Error("Error deleting backup")
deletionFailure = true
errs = append(errs, errors.Wrap(err, "error deleting backup from object storage"))
}

if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
Expand All @@ -268,9 +245,5 @@ func (c *gcController) garbageCollect(backup *api.Backup, log logrus.FieldLogger
}
}

if deletionFailure {
return errors.New("backup will not be deleted due to errors deleting related object storage files(s) and/or volume snapshots")
}

return nil
return kerrors.NewAggregate(errs)
}
Loading

0 comments on commit af71115

Please sign in to comment.