Allow forced backup deletion

Add --force and --confirm to `ark backup delete` to support forced
backup deletion. This forcibly removes the Ark GC finalizer (if it's
present) from a backup and will orphan any resources associated with the
backup, such as backup tarballs in object storage, persistent volume
snapshots, and restores for the backup.

If a backup has a deletion timestamp, display `Deleting` in `ark backup
describe` and `ark backup get`.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
This commit is contained in:
Andy Goldstein
2018-02-12 11:32:49 -05:00
parent 530e22939d
commit d24fb232cc
12 changed files with 277 additions and 68 deletions

View File

@@ -14,7 +14,9 @@ ark backup delete NAME [flags]
### Options ### Options
``` ```
-h, --help help for delete --confirm Confirm forceful deletion
--force Forcefully delete the backup, potentially leaving orphaned cloud resources
-h, --help help for delete
``` ```
### Options inherited from parent commands ### Options inherited from parent commands

View File

@@ -14,7 +14,9 @@ ark delete backup NAME [flags]
### Options ### Options
``` ```
-h, --help help for backup --confirm Confirm forceful deletion
--force Forcefully delete the backup, potentially leaving orphaned cloud resources
-h, --help help for backup
``` ```
### Options inherited from parent commands ### Options inherited from parent commands

View File

@@ -37,4 +37,8 @@ const (
// NamespaceScopedDir is the name of the directory containing namespace-scoped // NamespaceScopedDir is the name of the directory containing namespace-scoped
// resource within an Ark backup. // resource within an Ark backup.
NamespaceScopedDir = "namespaces" NamespaceScopedDir = "namespaces"
// GCFinalizer is the name of the finalizer Ark uses for backups to allow for the GC Controller to
// delete all resources associated with a backup.
GCFinalizer = "gc.ark.heptio.com"
) )

View File

@@ -17,49 +17,179 @@ limitations under the License.
package backup package backup
import ( import (
"bufio"
"encoding/json"
"fmt" "fmt"
"os" "os"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/controller"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/stringslice"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/heptio/ark/pkg/client" "github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd" "github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/controller"
kubeutil "github.com/heptio/ark/pkg/util/kube"
) )
func NewDeleteCommand(f client.Factory, use string) *cobra.Command { func NewDeleteCommand(f client.Factory, use string) *cobra.Command {
o := &DeleteOptions{}
c := &cobra.Command{ c := &cobra.Command{
Use: fmt.Sprintf("%s NAME", use), Use: fmt.Sprintf("%s NAME", use),
Short: "Delete a backup", Short: "Delete a backup",
Run: func(c *cobra.Command, args []string) { Run: func(c *cobra.Command, args []string) {
if len(args) != 1 { cmd.CheckError(o.Validate(c, args, f))
c.Usage() cmd.CheckError(o.Complete(f, args))
os.Exit(1) cmd.CheckError(o.Run())
}
kubeClient, err := f.KubeClient()
cmd.CheckError(err)
serverVersion, err := kubeutil.ServerVersion(kubeClient.Discovery())
cmd.CheckError(err)
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
cmd.CheckError(errors.Errorf("this command requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete))
}
arkClient, err := f.Client()
cmd.CheckError(err)
backupName := args[0]
err = arkClient.ArkV1().Backups(f.Namespace()).Delete(backupName, nil)
cmd.CheckError(err)
fmt.Printf("Backup %q deleted\n", backupName)
}, },
} }
o.BindFlags(c.Flags())
return c return c
} }
type DeleteOptions struct {
Name string
Force bool
Confirm bool
client clientset.Interface
namespace string
}
func (o *DeleteOptions) BindFlags(flags *pflag.FlagSet) {
flags.BoolVar(&o.Force, "force", o.Force, "Forcefully delete the backup, potentially leaving orphaned cloud resources")
flags.BoolVar(&o.Confirm, "confirm", o.Confirm, "Confirm forceful deletion")
}
func (o *DeleteOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
if len(args) != 1 {
return errors.New("you must specify only one argument, the backup's name")
}
kubeClient, err := f.KubeClient()
if err != nil {
return err
}
serverVersion, err := kubeutil.ServerVersion(kubeClient.Discovery())
if err != nil {
return err
}
if !serverVersion.AtLeast(controller.MinVersionForDelete) {
return errors.Errorf("this command requires the Kubernetes server version to be at least %s", controller.MinVersionForDelete)
}
return nil
}
func (o *DeleteOptions) Complete(f client.Factory, args []string) error {
o.Name = args[0]
var err error
o.client, err = f.Client()
if err != nil {
return nil
}
o.namespace = f.Namespace()
return nil
}
func (o *DeleteOptions) Run() error {
if o.Force {
return o.forceDelete()
}
return o.normalDelete()
}
func (o *DeleteOptions) normalDelete() error {
if err := o.client.ArkV1().Backups(o.namespace).Delete(o.Name, nil); err != nil {
return err
}
fmt.Printf("Request to delete backup %q submitted successfully.\nThe backup will be fully deleted after all associated data (disk snapshots, backup files, restores) are removed.\n", o.Name)
return nil
}
func (o *DeleteOptions) forceDelete() error {
backup, err := o.client.ArkV1().Backups(o.namespace).Get(o.Name, metav1.GetOptions{})
if err != nil {
return errors.WithStack(err)
}
if !o.Confirm {
// If the user didn't specify --confirm, we need to prompt for it
if !getConfirmation() {
return nil
}
}
// Step 1: patch to remove our finalizer, if it's there
if stringslice.Has(backup.Finalizers, v1.GCFinalizer) {
patchMap := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": stringslice.Except(backup.Finalizers, v1.GCFinalizer),
"resourceVersion": backup.ResourceVersion,
},
}
patchBytes, err := json.Marshal(patchMap)
if err != nil {
return errors.WithStack(err)
}
if _, err = o.client.ArkV1().Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes); err != nil {
return errors.WithStack(err)
}
}
// Step 2: issue the delete ONLY if it has never been issued before
if backup.DeletionTimestamp == nil {
if err = o.client.ArkV1().Backups(backup.Namespace).Delete(backup.Name, nil); err != nil {
return errors.WithStack(err)
}
}
fmt.Printf("Backup %q force-deleted.\n", backup.Name)
return nil
}
func getConfirmation() bool {
reader := bufio.NewReader(os.Stdin)
for {
fmt.Println("WARNING: forcing deletion of a backup may result in resources in the cloud (disk snapshots, backup files) becoming orphaned.")
fmt.Printf("Are you sure you want to continue (Y/N)? ")
confirmation, err := reader.ReadString('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "error reading user input: %v\n", err)
return false
}
confirmation = strings.TrimSpace(confirmation)
if len(confirmation) != 1 {
continue
}
switch strings.ToLower(confirmation) {
case "y":
return true
case "n":
return false
}
}
}

View File

@@ -32,7 +32,8 @@ func DescribeBackup(backup *v1.Backup) string {
DescribeBackupSpec(d, backup.Spec) DescribeBackupSpec(d, backup.Spec)
d.Println() d.Println()
DescribeBackupStatus(d, backup.Status) deleting := backup.DeletionTimestamp != nil && !backup.DeletionTimestamp.Time.IsZero()
DescribeBackupStatus(d, backup.Status, deleting)
}) })
} }
@@ -143,11 +144,14 @@ func DescribeBackupSpec(d *Describer, spec v1.BackupSpec) {
} }
func DescribeBackupStatus(d *Describer, status v1.BackupStatus) { func DescribeBackupStatus(d *Describer, status v1.BackupStatus, deleting bool) {
phase := status.Phase phase := status.Phase
if phase == "" { if phase == "" {
phase = v1.BackupPhaseNew phase = v1.BackupPhaseNew
} }
if deleting {
phase = "Deleting"
}
d.Printf("Phase:\t%s\n", phase) d.Printf("Phase:\t%s\n", phase)
d.Println() d.Println()

View File

@@ -87,6 +87,9 @@ func printBackup(backup *v1.Backup, w io.Writer, options printers.PrintOptions)
if status == "" { if status == "" {
status = v1.BackupPhaseNew status = v1.BackupPhaseNew
} }
if backup.DeletionTimestamp != nil && !backup.DeletionTimestamp.Time.IsZero() {
status = "Deleting"
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil { if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil {
return err return err

View File

@@ -49,6 +49,7 @@ import (
"github.com/heptio/ark/pkg/util/collections" "github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/encode" "github.com/heptio/ark/pkg/util/encode"
kubeutil "github.com/heptio/ark/pkg/util/kube" kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/stringslice"
) )
const backupVersion = 1 const backupVersion = 1
@@ -237,8 +238,8 @@ func (controller *backupController) processBackup(key string) error {
backup.Status.Version = backupVersion backup.Status.Version = backupVersion
// add GC finalizer if it's not there already // add GC finalizer if it's not there already
if !has(backup.Finalizers, gcFinalizer) { if !stringslice.Has(backup.Finalizers, api.GCFinalizer) {
backup.Finalizers = append(backup.Finalizers, gcFinalizer) backup.Finalizers = append(backup.Finalizers, api.GCFinalizer)
} }
// calculate expiration // calculate expiration

View File

@@ -190,7 +190,7 @@ func TestProcessBackup(t *testing.T) {
backup.Status.Phase = v1.BackupPhaseInProgress backup.Status.Phase = v1.BackupPhaseInProgress
backup.Status.Expiration.Time = expiration backup.Status.Expiration.Time = expiration
backup.Status.Version = 1 backup.Status.Version = 1
backup.Finalizers = []string{gcFinalizer} backup.Finalizers = []string{v1.GCFinalizer}
backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil) backupper.On("Backup", backup, mock.Anything, mock.Anything, mock.Anything).Return(nil)
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil) cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything, mock.Anything).Return(nil)
@@ -226,7 +226,7 @@ func TestProcessBackup(t *testing.T) {
res.Status.Version = 1 res.Status.Version = 1
res.Status.Expiration.Time = expiration res.Status.Expiration.Time = expiration
res.Status.Phase = v1.BackupPhase(phase) res.Status.Phase = v1.BackupPhase(phase)
res.Finalizers = []string{gcFinalizer} res.Finalizers = []string{v1.GCFinalizer}
return true, res, nil return true, res, nil
}) })
@@ -274,7 +274,7 @@ func TestProcessBackup(t *testing.T) {
finalizers, err := collections.GetSlice(patch, "metadata.finalizers") finalizers, err := collections.GetSlice(patch, "metadata.finalizers")
require.NoError(t, err, "patch does not contain metadata.finalizers") require.NoError(t, err, "patch does not contain metadata.finalizers")
assert.Equal(t, 1, len(finalizers)) assert.Equal(t, 1, len(finalizers))
assert.Equal(t, gcFinalizer, finalizers[0]) assert.Equal(t, v1.GCFinalizer, finalizers[0])
res, _ = collections.GetMap(patch, "metadata") res, _ = collections.GetMap(patch, "metadata")
assert.Equal(t, 1, len(res), "patch's metadata has the wrong number of keys") assert.Equal(t, 1, len(res), "patch's metadata has the wrong number of keys")

View File

@@ -39,10 +39,9 @@ import (
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/util/kube" "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/stringslice"
) )
const gcFinalizer = "gc.ark.heptio.com"
// MinVersionForDelete is the minimum Kubernetes server version that Ark // MinVersionForDelete is the minimum Kubernetes server version that Ark
// requires in order to be able to properly delete backups (including // requires in order to be able to properly delete backups (including
// the associated snapshots and object storage files). This is because // the associated snapshots and object storage files). This is because
@@ -122,7 +121,7 @@ func (c *gcController) handleFinalizer(_, newObj interface{}) {
} }
log.Debugf("Backup has finalizers %s", backup.Finalizers) log.Debugf("Backup has finalizers %s", backup.Finalizers)
if !has(backup.Finalizers, gcFinalizer) { if !stringslice.Has(backup.Finalizers, api.GCFinalizer) {
return return
} }
@@ -137,7 +136,7 @@ func (c *gcController) handleFinalizer(_, newObj interface{}) {
patchMap := map[string]interface{}{ patchMap := map[string]interface{}{
"metadata": map[string]interface{}{ "metadata": map[string]interface{}{
"finalizers": except(backup.Finalizers, gcFinalizer), "finalizers": stringslice.Except(backup.Finalizers, api.GCFinalizer),
"resourceVersion": backup.ResourceVersion, "resourceVersion": backup.ResourceVersion,
}, },
} }
@@ -153,32 +152,6 @@ func (c *gcController) handleFinalizer(_, newObj interface{}) {
} }
} }
// has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
}
return false
}
// except returns a new string slice that contains all of the entries
// from `items` except `val`.
func except(items []string, val string) []string {
var newItems []string
for _, itm := range items {
if itm != val {
newItems = append(newItems, itm)
}
}
return newItems
}
// Run is a blocking function that runs a single worker to garbage-collect backups // Run is a blocking function that runs a single worker to garbage-collect backups
// from object/block storage and the Ark API. It will return when it receives on the // from object/block storage and the Ark API. It will return when it receives on the
// ctx.Done() channel. // ctx.Done() channel.

View File

@@ -306,17 +306,17 @@ func TestHandleFinalizer(t *testing.T) {
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).Backup, backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).Backup,
}, },
{ {
name: "no gcFinalizer exits early", name: "no GCFinalizer exits early",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers("foo").Backup, backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers("foo").Backup,
}, },
{ {
name: "error when calling garbageCollect exits without patch", name: "error when calling garbageCollect exits without patch",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer).Backup, backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(api.GCFinalizer).Backup,
deleteBackupDirError: true, deleteBackupDirError: true,
}, },
{ {
name: "normal case - patch includes the appropriate fields", name: "normal case - patch includes the appropriate fields",
backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(gcFinalizer, "foo").WithResourceVersion("1").Backup, backup: arktest.NewTestBackup().WithDeletionTimestamp(time.Now()).WithFinalizers(api.GCFinalizer, "foo").WithResourceVersion("1").Backup,
expectGarbageCollect: true, expectGarbageCollect: true,
expectedPatch: []byte(`{"metadata":{"finalizers":["foo"],"resourceVersion":"1"}}`), expectedPatch: []byte(`{"metadata":{"finalizers":["foo"],"resourceVersion":"1"}}`),
}, },

View File

@@ -0,0 +1,44 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stringslice
// Has returns true if the `items` slice contains the
// value `val`, or false otherwise.
func Has(items []string, val string) bool {
for _, itm := range items {
if itm == val {
return true
}
}
return false
}
// Except returns a new string slice that contains all of the entries
// from `items` except `val`.
func Except(items []string, val string) []string {
// Default the capacity the len(items) instead of len(items)-1 in case items does not contain val.
newItems := make([]string, 0, len(items))
for _, itm := range items {
if itm != val {
newItems = append(newItems, itm)
}
}
return newItems
}

View File

@@ -0,0 +1,46 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stringslice
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestHas(t *testing.T) {
items := []string{}
assert.False(t, Has(items, "a"))
items = []string{"a", "b", "c"}
assert.True(t, Has(items, "a"))
assert.True(t, Has(items, "b"))
assert.True(t, Has(items, "c"))
assert.False(t, Has(items, "d"))
}
func TestExcept(t *testing.T) {
items := []string{}
except := Except(items, "asdf")
assert.Len(t, except, 0)
items = []string{"a", "b", "c"}
assert.Equal(t, []string{"b", "c"}, Except(items, "a"))
assert.Equal(t, []string{"a", "c"}, Except(items, "b"))
assert.Equal(t, []string{"a", "b"}, Except(items, "c"))
assert.Equal(t, []string{"a", "b", "c"}, Except(items, "d"))
}